Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for RSQRT (0.07 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

    //              rsqrt(variance + epsilon)
    // CHECK:  %[[RSQRT:.*]] = "tf.Rsqrt"(%[[ADD1]])
    //              scale * rsqrt(variance + epsilon)
    // CHECK:  %[[MUL1:.*]] = "tf.Mul"(%[[ARG1:.*]], %[[RSQRT]])
    //              x * scale * rsqrt(variance + epsilon)
    // CHECK:  %[[MUL2:.*]] = "tf.Mul"(%[[ARG0:.*]], %[[MUL1]])
    //              mean * scale * rsqrt(variance + epsilon)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

    // TODO(karimnosseir): Add constraints that the kernel code assumes.
    // constraint on axis and depth.
    multiclass L2NormalizePatterns<Op FirstOp, Op SecondOp> {
      // This pattern constructs L2NormalizationOp from
      // Mul->Rsqrt->Sum->Square Or
      // Div->sqrt->Sum->Square
      def L2NormalizePattern1#FirstOp#SecondOp : Pat<
                      (FirstOp $x,
                         (SecondOp
                            (TFL_SumOp
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/decompose_resource_ops.mlir

        // CHECK-DAG: %[[ACCUM_NEW:.*]] = "tf.AddV2"(%[[ACCUM]], %[[GRAD_SQ]]) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
        // CHECK-DAG: %[[RSQRT_ACCUM:.*]] = "tf.Rsqrt"(%[[ACCUM_NEW]]) : (tensor<4xf32>) -> tensor<4xf32>
        // CHECK-DAG: %[[ADAGRAD_LR:.*]] = "tf.Mul"(%[[LR]], %[[RSQRT_ACCUM]]) : (tensor<f32>, tensor<4xf32>) -> tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 19:47:48 UTC 2024
    - 51.3K bytes
    - Viewed (0)
  4. tensorflow/cc/gradients/math_grad.cc

      // Use the built-in operator.
      grad_outputs->push_back(
          internal::RsqrtGrad(scope, op.output(0), grad_inputs[0]));
      return scope.status();
    }
    REGISTER_GRADIENT_OP("Rsqrt", RsqrtGrad);
    
    Status ExpGrad(const Scope& scope, const Operation& op,
                   const std::vector<Output>& grad_inputs,
                   std::vector<Output>* grad_outputs) {
      // dy/dx = exp(x) = y
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 50.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

      explicit FusedBatchNormV3Pat(::mlir::MLIRContext *context)
          : ::mlir::RewritePattern(
                "tf.FusedBatchNormV3", 1, context,
                {"tf.Add", "tf.Const", "tf.Mul", "tf.Rsqrt", "tf.Sub"}) {}
    
      ::mlir::LogicalResult matchAndRewrite(
          ::mlir::Operation *fused_batch_norm,
          ::mlir::PatternRewriter &rewriter) const override {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/mark_for_compilation_pass.cc

                "Floor", "IsFinite", "IsInf", "IsNan", "Inv", "Reciprocal", "Log",
                "Log1p", "Invert", "LogicalNot", "Ndtri", "Neg", "Rint", "Round",
                "Rsqrt", "Sigmoid", "Sign", "Sinh", "Softplus", "Softsign", "Sqrt",
                "Square", "Tan", "Tanh", "Real", "Imag", "Erf", "Erfc", "Erfinv",
                "Lgamma", "Digamma",
                // Binary
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
Back to top