Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 25 for RSQRT (0.07 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

      %1 = "tf.Div"(%arg0, %0) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<8x16xf32>, tensor<8x16xf32>) -> tensor<8x16xf32>
      func.return %1: tensor<8x16xf32>
    
    // CHECK: %0 = "tf.Rsqrt"(%arg1) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<8x16xf32>) -> tensor<8x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-with-tf2xla-hlo-importer.mlir

        // CHECK-NEXT: %[[grad:.*]] = mhlo.convert %arg0 : tensor<8x8x8x8xf32>
        // CHECK-NEXT: %[[act:.*]] = mhlo.convert %arg1 : tensor<8x8x8x8xf32>
        // CHECK: %[[scr1:.*]] = mhlo.rsqrt
        // CHECK: %[[bcast_arg3:.+]] = "mhlo.dynamic_broadcast_in_dim"(%arg3, {{.*}}) <{broadcast_dimensions = dense<3> : tensor<1xi64>}> : (tensor<8xf32>, tensor<4xindex>) -> tensor<8x8x8x8xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 38.6K bytes
    - Viewed (1)
  3. tensorflow/compiler/mlir/lite/schema/schema.fbs

      SLICE = 65,
      SIN = 66,
      TRANSPOSE_CONV = 67,
      SPARSE_TO_DENSE = 68,
      TILE = 69,
      EXPAND_DIMS = 70,
      EQUAL = 71,
      NOT_EQUAL = 72,
      LOG = 73,
      SUM = 74,
      SQRT = 75,
      RSQRT = 76,
      SHAPE = 77,
      POW = 78,
      ARG_MIN = 79,
      FAKE_QUANT = 80,
      REDUCE_PROD = 81,
      REDUCE_MAX = 82,
      PACK = 83,
      LOGICAL_OR = 84,
      ONE_HOT = 85,
      LOGICAL_AND = 86,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/decompose_resource_ops.mlir

        // CHECK-DAG: %[[ACCUM_NEW:.*]] = "tf.AddV2"(%[[ACCUM]], %[[GRAD_SQ]]) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
        // CHECK-DAG: %[[RSQRT_ACCUM:.*]] = "tf.Rsqrt"(%[[ACCUM_NEW]]) : (tensor<4xf32>) -> tensor<4xf32>
        // CHECK-DAG: %[[ADAGRAD_LR:.*]] = "tf.Mul"(%[[LR]], %[[RSQRT_ACCUM]]) : (tensor<f32>, tensor<4xf32>) -> tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 19:47:48 UTC 2024
    - 51.3K bytes
    - Viewed (0)
  5. tensorflow/cc/gradients/math_grad.cc

      // Use the built-in operator.
      grad_outputs->push_back(
          internal::RsqrtGrad(scope, op.output(0), grad_inputs[0]));
      return scope.status();
    }
    REGISTER_GRADIENT_OP("Rsqrt", RsqrtGrad);
    
    Status ExpGrad(const Scope& scope, const Operation& op,
                   const std::vector<Output>& grad_inputs,
                   std::vector<Output>* grad_outputs) {
      // dy/dx = exp(x) = y
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 50.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    }
    
    // CHECK-LABEL:   func @rsqrt(
    // CHECK-SAME:                %[[VAL_0:.*]]: tensor<2xf32>) -> tensor<2xf32> {
    // CHECK:           %[[VAL_1:.*]] = "tf.Rsqrt"(%[[VAL_0]]) : (tensor<2xf32>) -> tensor<2xf32>
    // CHECK:           return %[[VAL_1]] : tensor<2xf32>
    // CHECK:         }
    func.func @rsqrt(%arg0: tensor<2xf32>) -> tensor<2xf32> {
      %0 = "mhlo.rsqrt"(%arg0) : (tensor<2xf32>) -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/ops.mlir

    ^bb0(%arg0: tensor<? x f32>):
      // CHECK: "tfl.rsqrt"(%arg0)
      %0 = "tfl.rsqrt"(%arg0): (tensor<? x f32>) -> tensor<? x f32>
      func.return %0 : tensor<? x f32>
    }
    
    // CHECK-LABEL: testRsqrtQuant
    func.func @testRsqrtQuant(%arg0: tensor<1x80x1x!quant.uniform<i8:f32, 0.048358432948589325:-128>>) -> tensor<1x80x1x!quant.uniform<i8:f32, 0.0066055487841367722:-128>> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

      explicit FusedBatchNormV3Pat(::mlir::MLIRContext *context)
          : ::mlir::RewritePattern(
                "tf.FusedBatchNormV3", 1, context,
                {"tf.Add", "tf.Const", "tf.Mul", "tf.Rsqrt", "tf.Sub"}) {}
    
      ::mlir::LogicalResult matchAndRewrite(
          ::mlir::Operation *fused_batch_norm,
          ::mlir::PatternRewriter &rewriter) const override {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/mark_for_compilation_pass.cc

                "Floor", "IsFinite", "IsInf", "IsNan", "Inv", "Reciprocal", "Log",
                "Log1p", "Invert", "LogicalNot", "Ndtri", "Neg", "Rint", "Round",
                "Rsqrt", "Sigmoid", "Sign", "Sinh", "Softplus", "Softsign", "Sqrt",
                "Square", "Tan", "Tanh", "Real", "Imag", "Erf", "Erfc", "Erfinv",
                "Lgamma", "Digamma",
                // Binary
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      %cst = arith.constant dense<[0]> : tensor<1xi32>
      %0 = "tfl.square"(%arg0) : (tensor<2xf32>) -> tensor<2xf32>
      %1 = "tfl.sum"(%0, %cst) {keep_dims = false} : (tensor<2xf32>, tensor<1xi32>) -> tensor<f32>
      %2 = "tfl.rsqrt"(%1) : (tensor<f32>) -> tensor<f32>
      %3 = "tfl.mul"(%arg0, %2) {fused_activation_function = "NONE"} : (tensor<2xf32>, tensor<f32>) -> tensor<2xf32>
      func.return %3: tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
Back to top