Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 24 for 1x1x3xi32 (0.25 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

        %10 = stablehlo.convert %3 : (tensor<1x1x3xi32>) -> tensor<1x1x3xf32>
        %11 = stablehlo.broadcast_in_dim %10, dims = [0, 1, 2] : (tensor<1x1x3xf32>) -> tensor<1x4x3xf32>  // Optional
        %12 = stablehlo.subtract %9, %11 : tensor<1x4x3xf32>  // Precalculated zp_neg.
        %13 = stablehlo.broadcast_in_dim %4, dims = [0, 1, 2] : (tensor<1x1x3xf32>) -> tensor<1x4x3xf32>  // Optional
        %14 = stablehlo.multiply %12, %13 : tensor<1x4x3xf32>  // s1 * s2
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

    // CHECK-LABEL: QuantizeReshapeOp
    func.func @QuantizeReshapeOp(%arg0: tensor<1x1x3xf32>) -> (tensor<1x3xf32>) {
      %1 = "quantfork.stats"(%arg0) {layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>} : (tensor<1x1x3xf32>) -> tensor<1x1x3xf32>
      %2 = "tfl.pseudo_const"() {value = dense<[-1, 3]> : tensor<2xi32>} : () -> tensor<2xi32>
      %3 = "tfl.reshape"(%1, %2) : (tensor<1x1x3xf32>, tensor<2xi32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc

          return %0 : tensor<1x1x4xf32>
        }
      )mlir";
    
      const OwningOpRef<ModuleOp> module_op =
          ParseModuleOpString(kXlaCallModuleOpWithQuantizationMethodAttr);
      ASSERT_TRUE(module_op);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/quantize-variables.mlir

      %50 = "tfl.read_variable"(%5) : (tensor<!tf_type.resource>) -> tensor<1x2x3xf32>
      %51 = "quantfork.stats"(%50) {layerStats = dense<[0.0, 1.0]> : tensor<2xf32>} : (tensor<1x2x3xf32>) -> tensor<1x2x3xf32>
      %52 = "tfl.concatenation"(%51, %0) {axis = 1 : i32, fused_activation_function = "NONE"} : (tensor<1x2x3xf32>, tensor<1x2x3xf32>) -> tensor<1x4x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/const-fold.mlir

      %cst_0 = arith.constant dense<0> : tensor<1x2x3xi32>
      %cst_1 = arith.constant dense<1> : tensor<1x2x3xi32>
      %0 = "tfl.concatenation"(%cst_0, %cst_1) {axis = 2 : i32, fused_activation_function = "NONE"} : (tensor<1x2x3xi32>, tensor<1x2x3xi32>) -> tensor<1x2x6xi32>
      func.return %0 : tensor<1x2x6xi32>
    
      // CHECK: %[[CST:.*]] = arith.constant dense<[{{\[}}{{\[}}0, 0, 0, 1, 1, 1], {{\[}}0, 0, 0, 1, 1, 1]]]> : tensor<1x2x6xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 45.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver_with_skipping.mlir

        %output_0, %min_1, %max_2, %histogram_3 = "tf.CustomAggregator"(%0) <{calibration_method = 1 : i32, id = "keeping_id", max_percentile = 0.000000e+00 : f32, min_percentile = 0.000000e+00 : f32, num_bins = 0 : i32}> : (tensor<10x1x3xf32>) -> (tensor<10x1x3xf32>, tensor<f32>, tensor<f32>, tensor<0xi64>)
        return %output_0 : tensor<10x1x3xf32>
      }
      // CHECK-LABEL: @main
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/tests/tf-tfl-translate-tf-quantize.mlir

    module {
    func.func @tfInplaceUpdate(%arg0: tensor<2x1x2xf32>) -> tensor<2x1x2xf32> {
      %1 = arith.constant dense<1> : tensor<1xi32>
      %2 = arith.constant dense<2.0> : tensor<1x1x2xf32>
      %3 = "tf.InplaceUpdate"(%arg0, %1, %2) {device = ""}
        : (tensor<2x1x2xf32>, tensor<1xi32>, tensor<1x1x2xf32>) -> tensor<2x1x2xf32>
      func.return %3 : tensor<2x1x2xf32>
    }
    }
    
    //CHECK: module {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 18:33:43 UTC 2024
    - 1.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range-float16.mlir

      func.return %17 : tensor<1x2x3xf32>
    
      // CHECK: %[[NONE:.*]] = "tfl.no_value"() <{value}> : () -> none
      // CHECK: %[[DQ_1:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_2:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_3:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/end2end/unroll_batch_matmul.pbtxt

    # CHECK:           %[[VAL_9:.*]] = "tfl.transpose"(%[[VAL_1]], %[[VAL_2]]) : (tensor<3x7xf32>, tensor<2xi32>) -> tensor<7x3xf32>
    # CHECK:           %[[VAL_10:.*]] = "tfl.fully_connected"(%[[VAL_7]]#0, %[[VAL_9]], %[[VAL_3]]) <{fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"}> : (tensor<1x5x3xf32>, tensor<7x3xf32>, none) -> tensor<5x7xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

      %prelu = "tfl.prelu"(%arg0, %cst) : (tensor<1x10x10x3xf32>, tensor<1x1x3xf32>) -> tensor<1x10x10x3xf32>
      func.return %prelu : tensor<1x10x10x3xf32>
    
    // CHECK: %[[cst:.*]] = arith.constant dense<[{{\[}}[1.66394591, 3.61694336, 2.0382936]]]> : tensor<1x1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
Back to top