Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for squared_difference (0.29 sec)

  1. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

    // In above calculation, they are replaced by new values. These new mean and
    // variance are calculated as following:
    // new_mean = mean(x, axis=[0, 1, 2])
    // new_variance = mean(squared_difference(x, new_mean), axis=[0, 1, 2])
    //
    // The DDR rule for the is_training equals true case is as following:
    // def : Pattern<
    //     (TF_FusedBatchNormV3Op:$root
    //         $x, $scale, $offset, $mean, $variance,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

         ActFun),
        [(HasOneUse $first_output),
         (HasRankAtMost<4> $input),
         (HasRankAtMost<4> $a),
         (HasRankAtMost<4> $b)]>;
    }
    
    // We can eliminate Relu from Relu(SquaredDifference(x, y)),
    // since the result of SquaredDifference is always non-negative.
    // TFLite interpreter doesn't support Relu+int32 for now. So the test cases
    // are failing without the following pattern to optimize Relu away fixes
    // the problem.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  3. tensorflow/cc/gradients/math_grad.cc

      auto gx_1 = Mul(scope, grad_inputs[0], Mul(scope, two, Sub(scope, x_1, x_2)));
      auto gx_2 = Neg(scope, gx_1);
      return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2);
    }
    REGISTER_GRADIENT_OP("SquaredDifference", SquaredDifferenceGrad);
    
    Status AddNGrad(const Scope& scope, const Operation& op,
                    const std::vector<Output>& grad_inputs,
                    std::vector<Output>* grad_outputs) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 50.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir

    func.func @squared_difference_real(%arg0: tensor<3xf32>, %arg1: tensor<3xf32>) -> tensor<3xf32> {
      // CHECK: [[R1:%.+]] = "tf.Sub"(%arg0, %arg1)
      // CHECK: "tf.Mul"([[R1]], [[R1]])
      %1 = "tf.SquaredDifference"(%arg0, %arg1) : (tensor<3xf32>, tensor<3xf32>) -> tensor<3xf32>
      func.return %1 : tensor<3xf32>
    }
    
    // CHECK-LABEL: func @squared_difference_complex
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 92K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

      // CHECK:  %[[MEAN:.*]] = "tf.Mean"(%arg0, %[[CST]]) <{keep_dims = false}> : (tensor<1x1x6x2xf32>, tensor<3xi32>) -> tensor<2xf32>
      // CHECK:  %[[SQ:.*]] = "tf.SquaredDifference"(%arg0, %[[MEAN]]) : (tensor<1x1x6x2xf32>, tensor<2xf32>) -> tensor<1x1x6x2xf32>
      // CHECK:  %[[MEAN0:.*]] = "tf.Mean"(%[[SQ]], %[[CST]]) <{keep_dims = false}> : (tensor<1x1x6x2xf32>, tensor<3xi32>) -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/mark_for_compilation_pass.cc

                "TruncateMod", "Equal", "NotEqual", "Greater", "GreaterEqual",
                "Less", "LessEqual", "SigmoidGrad", "SoftplusGrad", "SoftsignGrad",
                "TanhGrad", "Pow", "SquaredDifference", "ApproximateEqual",
                // Others
                "AddN", "Bitcast", "Cast", "ClipByValue", "Const", "Empty",
                "Identity", "IdentityN", "Relu", "Relu6", "ReluGrad", "Relu6Grad",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
Back to top