Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 157 for I8 (0.02 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_xla.mlir

      %dq_input= "quantfork.dcast"(%q_input) : (tensor<1x3x4x3x!quant.uniform<i8:f32, 0.58810077742034317:-128>>) -> tensor<1x3x4x3xf32>
      %q_weight = "quantfork.qcast"(%weight) : (tensor<2x3x3x2xf32>) -> tensor<2x3x3x2x!quant.uniform<i8:f32, 0.074855112561992565:-1>>
      %dq_weight = "quantfork.dcast"(%q_weight) : (tensor<2x3x3x2x!quant.uniform<i8:f32, 0.074855112561992565:-1>>) -> tensor<2x3x3x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/post-quantize.mlir

        :  ( tensor<1x28x28x!quant.uniform<i8:f32, 0.003:-128>>,
             tensor<20x28x!quant.uniform<i8:f32, 0.006:-34>>, tensor<20x28x!quant.uniform<i8:f32, 0.006:-34>>, tensor<20x28x!quant.uniform<i8:f32, 0.006:-34>>, tensor<20x28x!quant.uniform<i8:f32, 0.006:-34>>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir

    // CHECK-PER-TENSOR: %[[DOT_GENERAL_0:.+]] = stablehlo.dot_general %[[ARG_1]], %[[ARG_2]], contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, {{.*}}>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32, {{.*}}>>) -> tensor<1x3x!quant.uniform<i32:f32, {{.*}}>>...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 91.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-nnapi.mlir

       %0 = "tfl.pseudo_const"() {value = dense<[1, 2]> : tensor<2xi32>} : () -> tensor<2xi32>
       %1 = "tfl.mean"(%arg0, %0) {keep_dims = false} : (tensor<?x7x7x2048x!quant.uniform<i8:f32, 0.6:-128>>, tensor<2xi32>) -> tensor<?x2048x!quant.uniform<i8:f32, 0.6:-128>>
      func.return %1 : tensor<?x2048x!quant.uniform<i8:f32, 0.6:-128>>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc

          %3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
          return %3 : tensor<1x3xf32>
        }
    
        func.func private @quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 10:10:34 UTC 2024
    - 18.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir

    // CHECK-SAME: {_quantization_method = "static_range_ptq { }"}
    // CHECK-SAME: : (tensor<1x4x!quant.uniform<i8:f32, 6.000000e-03:-128>>, tensor<4x3x!quant.uniform<i8<-127:127>:f32:1, {5.000000e-03,5.000000e-03,5.000000e-03}>>) -> tensor<1x3x!quant.uniform<i8:f32, 1.000000e-03:-3>>
    // CHECK: %[[DCAST_0:.+]] = "quantfork.dcast"(%[[CALL_0]]) :  (tensor<1x3x!quant.uniform<i8:f32, 1.000000e-03:-3>>) -> tensor<1x3xf32>
    // CHECK: return
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 01:38:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/modify_io_nodes.mlir

      %4 = "tfl.reshape"(%3, %cst) : (tensor<1x112x112x32x!quant.uniform<i8:f32, 0.023528476789885875>>, tensor<2xi32>) -> tensor<1x401408x!quant.uniform<i8:f32, 0.023528476789885875>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize.mlir

      // CHECK-SAME: quant.uniform<i8:f32, 0.023529411764705882:-128>
      // CHECK: %[[dq2:.*]] = "quantfork.dcast"(%[[q2]])
      // CHECK-SAME: quant.uniform<i8:f32, 0.023529411764705882:-128>
      // CHECK: %[[q3:.*]] = "quantfork.qcast"(%[[cst3]])
      // CHECK-SAME: quant.uniform<i8:f32, 3.9215686274509805E-9>
      // CHECK: %[[dq3:.*]] = "quantfork.dcast"(%[[q3]])
      // CHECK-SAME: quant.uniform<i8:f32, 3.9215686274509805E-9>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 19:52:06 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

      %2 = "tfl.reshape"(%arg0, %0) : (tensor<4x384x32x!quant.uniform<i8:f32, 0.2:-3>>, tensor<4xi32>) -> tensor<1x4x384x32x!quant.uniform<i8:f32, 0.2:-3>>
      // CHECK-NOT: tac.device tac.inference_type
      %3 = "tfl.quantize"(%2) {qtype = tensor<1x4x384x32x!quant.uniform<i8:f32, 0.19:1>>} : (tensor<1x4x384x32x!quant.uniform<i8:f32, 0.2:-3>>) -> tensor<1x4x384x32x!quant.uniform<i8:f32, 0.19:1>>
      // CHECK: tac.device = "CPU", tac.inference_type = "QUANTIZED_INT8"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir

      func.return %2 : tensor<1x32x32x16x!quant.uniform<i8:f32, 1.0>>
    }
    
    // -----
    
    // CHECK-LABEL: @test_conv2d_qi16
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.1K bytes
    - Viewed (0)
Back to top