Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 20 for 2x1x3xf32 (0.13 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

        %11 = stablehlo.broadcast_in_dim %10, dims = [0, 1, 2] : (tensor<1x1x3xf32>) -> tensor<1x4x3xf32>  // Optional
        %12 = stablehlo.subtract %9, %11 : tensor<1x4x3xf32>  // Precalculated zp_neg.
        %13 = stablehlo.broadcast_in_dim %4, dims = [0, 1, 2] : (tensor<1x1x3xf32>) -> tensor<1x4x3xf32>  // Optional
        %14 = stablehlo.multiply %12, %13 : tensor<1x4x3xf32>  // s1 * s2
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/quantize-variables.mlir

      %50 = "tfl.read_variable"(%5) : (tensor<!tf_type.resource>) -> tensor<1x2x3xf32>
      %51 = "quantfork.stats"(%50) {layerStats = dense<[0.0, 1.0]> : tensor<2xf32>} : (tensor<1x2x3xf32>) -> tensor<1x2x3xf32>
      %52 = "tfl.concatenation"(%51, %0) {axis = 1 : i32, fused_activation_function = "NONE"} : (tensor<1x2x3xf32>, tensor<1x2x3xf32>) -> tensor<1x4x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

    func.func @QuantizeReshapeOp(%arg0: tensor<1x1x3xf32>) -> (tensor<1x3xf32>) {
      %1 = "quantfork.stats"(%arg0) {layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>} : (tensor<1x1x3xf32>) -> tensor<1x1x3xf32>
      %2 = "tfl.pseudo_const"() {value = dense<[-1, 3]> : tensor<2xi32>} : () -> tensor<2xi32>
      %3 = "tfl.reshape"(%1, %2) : (tensor<1x1x3xf32>, tensor<2xi32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc

          %1 = "quantfork.qcast"(%0) {volatile} : (tensor<1x3xf32>) -> tensor<1x3x!quant.uniform<i8:f32, 0.13170163023705575:-1>>
          %2 = "quantfork.dcast"(%1) : (tensor<1x3x!quant.uniform<i8:f32, 0.13170163023705575:-1>>) -> tensor<1x3xf32>
          %3 = stablehlo.reshape %2 : (tensor<1x3xf32>) -> tensor<3x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 07:19:09 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc

          return %0 : tensor<1x1x4xf32>
        }
      )mlir";
    
      const OwningOpRef<ModuleOp> module_op =
          ParseModuleOpString(kXlaCallModuleOpWithQuantizationMethodAttr);
      ASSERT_TRUE(module_op);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/einsum.mlir

      // CHECK: return %[[v2]] : tensor<3x4x6x2xf32>
    }
    
    func.func @einsum_reduceddim(%arg0: tensor<2x5x7xf32>, %arg1: tensor<2x5x7x3xf32>) -> tensor<2x5x3xf32> {
      %0 = "tf.Einsum"(%arg0, %arg1) {T = "tfdtype$DT_FLOAT", equation = "bin,binj->bij"}: (tensor<2x5x7xf32>, tensor<2x5x7x3xf32>) -> tensor<2x5x3xf32>
      func.return %0 : tensor<2x5x3xf32>
      // CHECK-LABEL: einsum_reduceddim
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/tests/optimize.mlir

      %2 = "mhlo.dot_general"(%arg2, %arg3) {
        dot_dimension_numbers = #mhlo.dot<
          lhs_contracting_dimensions = [2],
          rhs_contracting_dimensions = [0]
      >} : (tensor<3x1x512xf32>, tensor<512x13xf32>) -> tensor<3x1x13xf32>
      %r = "mhlo.concatenate"(%0, %1, %2) <{dimension = 0 : i64}> : (tensor<1x1x13xf32>, tensor<2x1x13xf32>, tensor<3x1x13xf32>) -> tensor<6x1x13xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 22.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_op_with_region.mlir

        %9 = "quantfork.qcast"(%8) {volatile} : (tensor<2x3x1x3xf32>) -> tensor<2x3x1x3x!quant.uniform<i8:f32, 3.000000e-01:1>>
        %10 = "quantfork.dcast"(%9) : (tensor<2x3x1x3x!quant.uniform<i8:f32, 3.000000e-01:1>>) -> tensor<2x3x1x3xf32>
        %11 = stablehlo.reshape %10 : (tensor<2x3x1x3xf32>) -> tensor<2x3x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 18.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/experimental/tac/tests/pick-subgraphs.mlir

        func.return %2 : tensor<2x100xf32>
      }
      func.func @func_2_CPU_FLOAT(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<2x100xf32> attributes {tac.cost = 2.000000e+01 : f32, tac.device = "CPU", tac.inference_type = "FLOAT", tac.interface_name = "func_2"} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

      %prelu = "tfl.prelu"(%arg0, %cst) : (tensor<1x10x10x3xf32>, tensor<1x1x3xf32>) -> tensor<1x10x10x3xf32>
      func.return %prelu : tensor<1x10x10x3xf32>
    
    // CHECK: %[[cst:.*]] = arith.constant dense<[{{\[}}[1.66394591, 3.61694336, 2.0382936]]]> : tensor<1x1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
Back to top