Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 31 for 2x3x3x2xi8 (0.16 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/optimize.mlir

      return %2 : tensor<?x2x2x1xi32>
    }
    
    // -----
    
    // CHECK-LABEL: func @convolution_add_add_static
    func.func @convolution_add_add_static(
        %lhs: tensor<2x3x2x1xi8>, %rhs: tensor<2x1x1x1xi8>,
        %zp_offset: tensor<2x2x2x1xi32>, %bias: tensor<1xi32>
      ) -> tensor<2x2x2x1xi32> {
      // CHECK-DAG: %[[conv:.*]] = mhlo.convolution
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Feb 24 02:26:47 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir

    // CHECK-SAME: (tensor<1x2x2x3xi8>, tensor<2x2x3x2xi8>, tensor<2xi32>, tensor<f32>, tensor<i32>, tensor<2xf32>, tensor<2xi32>, tensor<2xf32>, tensor<2xi32>, tensor<f32>, tensor<i32>) -> tensor<*xf32>
    // CHECK: return %[[conv_quant]]
    
    // CHECK-LABEL: func private @quantized_conv2d_with_bias_and_relu6_float_output_fn_0
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 25.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq_per_channel.mlir

    // CHECK: %[[dq0:.*]] = "quantfork.dcast"(%[[q0]])
    
    // CHECK: %[[q1:.*]] = "quantfork.qcast"(%[[cst_1]]) {volatile}
    // CHECK-SAME: tensor<2x3x3x2x!quant.uniform<i8<-127:127>:f32:3, {0.075176584439014829,0.072960192762960605}>>
    // CHECK: %[[dq1:.*]] = "quantfork.dcast"(%[[q1]])
    
    // CHECK: %[[q2:.*]] = "quantfork.qcast"(%arg0)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 01 10:21:29 UTC 2023
    - 4.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_xla.mlir

    // CHECK: %[[minimum2:.*]] = "tf.Minimum"(%[[maximum2]]
    // CHECK: %[[round2:.*]] = "tf.Round"(%[[minimum2]]
    // CHECK: %[[quant2:.*]] = "tf.Cast"(%[[round2]]) <{Truncate = false}> : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xi8>
    
    // CHECK: %[[pad2:.*]] = "tf.PadV2"(%[[quant2]]
    // CHECK: %[[xlaconv2:.*]] = "tf.XlaConvV2"(%[[pad2]]
    // CHECK: %[[sub2:.*]] = "tf.Sub"(%[[xlaconv2]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir

    func.func @test_depthwise_conv2d_replace_float(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x112x112x32xf32> {
      // CHECK-DAG: %[[VAL0:.+]] = "tfl.pseudo_qconst"() <{qtype = tensor<32x3x3x3x!quant.uniform<{{.+}}>>, value = dense<42> : tensor<32x3x3x3xi8>}>
      // CHECK-DAG: %[[VAL1:.+]] = "tfl.pseudo_qconst"() <{qtype = tensor<32x!quant.uniform<{{.+}}>>, value = dense<0> : tensor<32xi32>}>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

      %0 = "quantfork.qcast"(%cst_1) : (tensor<2x3x3x2xf32>) -> tensor<2x3x3x2x!quant.uniform<i8<-127:127>:f32:3, {0.003937007874015748,0.003937007874015748}>>
      %1 = "quantfork.dcast"(%0) : (tensor<2x3x3x2x!quant.uniform<i8<-127:127>:f32:3, {0.003937007874015748,0.003937007874015748}>>) -> tensor<2x3x3x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/post-quantize.mlir

      %1 = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq.mlir

    // CHECK-SAME: tensor<2x!quant.uniform<i32:f32, 0.044169864606680966>>
    // CHECK: %[[dq0:.*]] = "quantfork.dcast"(%[[q0]])
    
    // CHECK: %[[q1:.*]] = "quantfork.qcast"(%[[cst_1]]) {volatile}
    // CHECK-SAME: tensor<2x3x3x2x!quant.uniform<i8<-127:127>:f32, 0.075176584439014829>>
    // CHECK: %[[dq1:.*]] = "quantfork.dcast"(%[[q1]])
    
    // CHECK: %[[q2:.*]] = "quantfork.qcast"(%arg0)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 01 10:21:29 UTC 2023
    - 9.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/quantize.mlir

    // CHECK: %[[cst1:.*]] = "tfl.pseudo_qconst"() <{qtype = tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 1.000000e-01>>, value = dense<1> : tensor<32x3x3x3xi8>}>
    // CHECK: %[[conv:.*]] = "tfl.conv_2d"(%arg0, %[[cst1]], %[[cst0]])
    // CHECK: return %[[conv]] : tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
    }
    
    // CHECK-LABEL: QuantizeConv2D4Bit
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

      %3 = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>
      %4 = "tfl.dequantize"(%3) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>) -> tensor<32x3x3x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
Back to top