Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 29 for 2x2x4xf32 (0.29 sec)

  1. tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc

      module {
        func.func @main(%arg0: tensor<2x2x2xf32>, %arg1: tensor<2x2x2xf32>) -> tensor<2x2x2xf32> attributes {_from_xla_call_module} {
          %0 = stablehlo.dot_general %arg0, %arg1,
            batching_dims = [0] x [0],
            contracting_dims = [2] x [1],
            precision = [DEFAULT, DEFAULT]
          : (tensor<2x2x2xf32>, tensor<2x2x2xf32>) -> tensor<2x2x2xf32>
            return %0 : tensor<2x2x2xf32>
        }
      }
    )mlir";
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 22.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc

          return %0 : tensor<1x1x4xf32>
        }
      )mlir";
    
      OwningOpRef<ModuleOp> module_op =
          ParseModuleOpString(kModuleXlaCallModuleOpWithDefaultQuantizationMethod);
      ASSERT_TRUE(module_op);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 07:19:09 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc

          return %0 : tensor<1x1x4xf32>
        }
      )mlir";
    
      const OwningOpRef<ModuleOp> module_op =
          ParseModuleOpString(kXlaCallModuleOpWithQuantizationMethodAttr);
      ASSERT_TRUE(module_op);
    
      func::FuncOp main_fn = FindMainFuncOp(*module_op);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/const-fold.mlir

      %1 = "tfl.add"(%cst_2, %cst_1) {fused_activation_function = "NONE"} : (tensor<2x2x2xf32>, tensor<  2x2xf32>) -> tensor<2x2x2xf32>
      %2 = "tfl.add"(%cst_0, %cst_2) {fused_activation_function = "NONE"} : (tensor<    2xf32>, tensor<2x2x2xf32>) -> tensor<2x2x2xf32>
    
      func.return %0, %1, %2 : tensor<2x2xf32>, tensor<2x2x2xf32>, tensor<2x2x2xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 45.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/shape_inference.mlir

        %unshaped = "tf.Cast"(%arg1) : (tensor<1x2x3xf32>) -> tensor<*xf32>
        // CHECK: <{is_stateless = true}>
        %0 = "tf.IfRegion"(%arg0) <{is_stateless = true}> ({
          // CHECK: "tf.Add"{{.+}}(tensor<1x2x3xf32>, tensor<1x2x3xf32>) -> tensor<1x2x3xf32>
          // CHECK: "tf.Yield"{{.+}}(tensor<1x2x3xf32>) -> ()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 17:24:10 UTC 2024
    - 167.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/tpu_sharding_identification.mlir

    }
    func.func @_func(%arg0: tensor<2x4xf32>, %arg1: tensor<4x2xf32>) -> tensor<2x2xf32> {
      %0 = "tf.MatMul"(%arg0, %arg1) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01"} : (tensor<2x4xf32>, tensor<4x2xf32>) -> tensor<2x2xf32>
      %1 = "tf.Identity"(%0) : (tensor<2x2xf32>) -> tensor<2x2xf32>
      return %1 : tensor<2x2xf32>
    }
    
    // -----
    // The following op sharding is used in the following test case:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 20 19:07:52 UTC 2024
    - 47.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/quantize.mlir

      %0 = "tfl.var_handle"() {container = "", shared_name = "states"} : () -> tensor<!tf_type.resource<tensor<1x2x3xf32>>>
      %1 = "tfl.read_variable"(%0) : (tensor<!tf_type.resource<tensor<1x2x3xf32>>>) -> tensor<1x2x3xf32>
      %2 = "tfl.quantize"(%1) {qtype = tensor<1x2x3x!quant.uniform<u8<1:255>:f32, 0.047244094488188976:128>>} : (tensor<1x2x3xf32>) -> tensor<1x2x3x!quant.uniform<u8<1:255>:f32, 0.047244094488188976:128>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir

    // CHECK: stablehlo.maximum
    // CHECK: (tensor<1x5x5x4xf32>, tensor<f32>) -> tensor<1x2x2x4xf32>
    // CHECK: %[[TRANSPOSE_1:.+]] = stablehlo.transpose %[[REDUCE_WINDOW_MAX]], dims = [0, 3, 1, 2] : (tensor<1x2x2x4xf32>) -> tensor<1x4x2x2xf32>
    // CHECK: return %[[TRANSPOSE_1]]
    
    // -----
    
    // Tests that a `maximum(add(convolution(%activation, %weight), %bias), %zero)`
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/region-control-flow-to-functional.mlir

      // CHECK: [[Result:%.*]] = "tf.Case"(%arg0, %arg1)
      // CHECK-SAME: branches = [@tf.CaseRegion_branch0{{.*}}, @tf.CaseRegion_branch1{{.*}}]
      // CHECK-SAME: is_stateless = false
      %0 = "tf.CaseRegion"(%arg0) ({
        %1 = "tf.Cast"(%arg1) {Truncate = false} : (tensor<!tf_type.resource<tensor<1x2x3xf32>>>) -> tensor<!tf_type.resource>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 02 11:15:34 UTC 2024
    - 44.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/optimize.mlir

    func.func @ReorderAddWithConstant(%arg0: tensor<2x2xf32>) -> tensor<2x2xf32> {
      %cst = arith.constant dense<1.0> : tensor<2x2xf32>
      %cst_1 = arith.constant dense<2.0> : tensor<2x2xf32>
      %0 = "tfl.add"(%arg0, %cst) {fused_activation_function = "NONE"} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
      %1 = "tfl.add"(%0, %cst_1) {fused_activation_function = "NONE"} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
Back to top