Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 207 for UNIFORM (0.11 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/optimize_graph.mlir

      %requant = stablehlo.uniform_quantize %conv : (tensor<1x3x4x2x!quant.uniform<i32:f32, 5.8949912267181218E-5>>) -> tensor<1x3x4x2x!quant.uniform<i8:f32, 0.045673100153605144:-62>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 08 22:40:14 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

      %1 = "tfl.quantize"(%arg0) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.0:128>>} : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<u8:f32, 1.0:128>>
      %2 = "tfl.dequantize"(%1) : (tensor<2x2x!quant.uniform<u8:f32, 1.0:128>>) -> tensor<2x2xf32>
      func.return %2 : tensor<2x2xf32>
    
    // CHECK-NEXT: %[[q:.*]] = "tfl.quantize"(%arg0) <{qtype = tensor<2x2x!quant.uniform<i8:f32, 1.000000e+00>>}> : (tensor<2x2xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/lstm.mlir

    tensor<2048x!quant.uniform<i32:f32, 1.523437447303877E-7>>, %arg13: tensor<640x2048x!quant.uniform<i8<-127:127>:f32, 0.021174000576138496>>, %arg14: tensor<640x!quant.uniform<i32:f32, 1.601389680352559E-4>>, %arg15: tensor<2048x!quant.uniform<i16:f32, 4.3700000969693065E-4>>, %arg16: tensor<2048x!quant.uniform<i16:f32, 1.1000000085914508E-4>>, %arg17: tensor<2048x!quant.uniform<i16:f32, 1.6799999866634607E-4>>, %arg18: tensor<2048x!quant.uniform<i16:f32, 1.55999994603917E-4>>, %arg19: tensor<1x640x!quant.uniform<i8:f32,...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

        : QuantizedType<"Uniform",
                            [8, zero_pt, smantissa, sexp, -128, 127], 1>;
    
    // General uniform quantized types. The definitions can be used to specify
    // operand's tensor types.
    def QI4 : QuantizedType<"Uniform", [4], 1>;
    def QUI8 : QuantizedType<"Uniform", [8], 0>;
    def QI8 : QuantizedType<"Uniform", [8], 1>;
    def QUI16 : QuantizedType<"Uniform", [16], 0>;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

      %5 = "tfl.add"(%4, %arg3) {tac.device = "CPU", tac.inference_type = "QUANTIZED_INT8", fused_activation_function = "NONE"} : (tensor<1x384x128x!quant.uniform<i8:f32, 0.3:3>>, tensor<128x!quant.uniform<i8:f32, 0.2:-4>>) -> tensor<1x384x128x!quant.uniform<i8:f32, 0.3:-3>>
      func.return %5 : tensor<1x384x128x!quant.uniform<i8:f32, 0.3:-3>>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/post-quantize-dynamic-range.mlir

      %q_w = "tfl.pseudo_qconst"() {qtype = tensor<1024x1x1x1x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>>, value = dense<127> : tensor<1024x1x1x1xi8>} : () -> tensor<1024x1x1x1x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>>
      %dq_w = "tfl.dequantize"(%q_w) : (tensor<1024x1x1x1x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>>) -> tensor<1024x1x1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/ops.mlir

    tensor<2048x!quant.uniform<i32:f32, 1.523437447303877E-7>>, %arg13: tensor<640x2048x!quant.uniform<i8<-127:127>:f32, 0.021174000576138496>>, %arg14: tensor<640x!quant.uniform<i32:f32, 1.601389680352559E-4>>, %arg15: tensor<2048x!quant.uniform<i16:f32, 4.3700000969693065E-4>>, %arg16: tensor<2048x!quant.uniform<i16:f32, 1.1000000085914508E-4>>, %arg17: tensor<2048x!quant.uniform<i16:f32, 1.6799999866634607E-4>>, %arg18: tensor<2048x!quant.uniform<i16:f32, 1.55999994603917E-4>>, %arg19: tensor<1x640x!quant.uniform<i8:f32,...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfr/tests/rewrite_quantized_io.mlir

    func.func @remove_quantized_io(
      %arg0: tensor<1x10x!quant.uniform<i8:f32, 0.1:-128>>,
      %arg1: tensor<1x5xf32>) -> (tensor<1x10x!quant.uniform<i8:f32, 0.2:42>>, tensor<1x5xf32>) {
      %0 = "tf.MyRequantize"(%arg0) : (tensor<1x10x!quant.uniform<i8:f32, 0.1:-128>>) -> tensor<1x10x!quant.uniform<i8:f32, 0.2:42>>
      %1 = "tf.Intermediate"(%arg1) : (tensor<1x5xf32>) -> tensor<1x5xf32>
      func.return %0, %1 : tensor<1x10x!quant.uniform<i8:f32, 0.2:42>>, tensor<1x5xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 2.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize.mlir

      // CHECK-SAME: quant.uniform<i8:f32, 0.023529411764705882:-128>
      // CHECK: %[[dq2:.*]] = "quantfork.dcast"(%[[q2]])
      // CHECK-SAME: quant.uniform<i8:f32, 0.023529411764705882:-128>
      // CHECK: %[[q3:.*]] = "quantfork.qcast"(%[[cst3]])
      // CHECK-SAME: quant.uniform<i8:f32, 3.9215686274509805E-9>
      // CHECK: %[[dq3:.*]] = "quantfork.dcast"(%[[q3]])
      // CHECK-SAME: quant.uniform<i8:f32, 3.9215686274509805E-9>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 19:52:06 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

      %2 = "tfl.reshape"(%arg0, %0) : (tensor<4x384x32x!quant.uniform<i8:f32, 0.2:-3>>, tensor<4xi32>) -> tensor<1x4x384x32x!quant.uniform<i8:f32, 0.2:-3>>
      // CHECK-NOT: tac.device tac.inference_type
      %3 = "tfl.quantize"(%2) {qtype = tensor<1x4x384x32x!quant.uniform<i8:f32, 0.19:1>>} : (tensor<1x4x384x32x!quant.uniform<i8:f32, 0.2:-3>>) -> tensor<1x4x384x32x!quant.uniform<i8:f32, 0.19:1>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
Back to top