Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 21 for dequantize_i8 (0.18 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir

    // CHECK-SAME: tf_quant.quantized_ops = ["Conv3D", "BiasAdd"]
    // CHECK: func private @quantized_batch_matmul_with_bias_fn
    // CHECK-SAME: tf_quant.quantized_ops = ["BatchMatMul", "BiasAdd"]
    // CHECK: func private @quantize_i8
    // CHECK: func private @dequantize_i8
    
    // UQ-CHECK-NOT: func private @internal_conv2d_fn
    // UQ-CHECK-NOT: func private @internal_requantize_qi8_fn
    // UQ-CHECK-NOT: func private @internal_requantize_no_activation_fn
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 3.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_flow.mlir

    // CHECK-NEXT: %[[V3:.*]] = "tf.PartitionedCall"(%[[V2]], %[[CST_4]], %[[CST_5]]) <{config = "", config_proto = "", executor_type = "", f = @dequantize_i8}> : (tensor<*xi8>, tensor<f32>, tensor<i32>) -> tensor<*xf32>
    // CHECK-NEXT: return %[[V3]] : tensor<*xf32>
    
    // CHECK: func private @quantize_i8(
    // CHECK: func private @dequantize_i8(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 3.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/replace_cast_hacks_with_tf_xla_ops_large_constants.mlir

        %4 = "tf.PartitionedCall"(%3, %cst_2, %cst_3) {config = "", config_proto = "", executor_type = "", f = @dequantize_i8} : (tensor<1x2240x1120x512xi8>, tensor<f32>, tensor<i32>) -> tensor<1x2240x1120x512xf32>
        return %4 : tensor<1x2240x1120x512xf32>
      }
      func.func private @quantize_i8(%arg0: tensor<1x2240x2240x3xf32>, %arg1: tensor<f32>, %arg2: tensor<i32>) -> tensor<1x2240x2240x3xi8> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 5.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composit_functions_debugging.mlir

    // TF-DAG: %[[conv1_dequantized_1:.*]] = "tf.PartitionedCall"(%[[conv1_quantized]], %[[out_scale]], %[[in_out_zp]]) <{config = "", config_proto = "", executor_type = "", f = @dequantize_i8}>
    // TF-DAG: %[[identity:.*]] = "tf.Identity"(%[[conv1_dequantized_1]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 06 01:23:21 UTC 2023
    - 80.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions.mlir

    // CHECK: %[[dequantize:.*]] = "tf.PartitionedCall"(%[[conv_quant]], %[[out_scale]], %[[out_zp]])
    // CHECK-SAME: f = @dequantize_i8
    
    // CHECK: %[[conv_float:.*]] = "tf.PartitionedCall"(%arg0, %[[w_float]], %[[b_float]])
    // CHECK-SAME: f = @composite_conv2d_with_bias_and_relu6_fn_1
    
    // CHECK: return %[[dequantize]], %[[conv_float]]
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 06 01:23:21 UTC 2023
    - 15.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_xla_weight_only.mlir

          } : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    
        func.return %out : tensor<*xf32>
      }
    
      // Used for legacy weight-only
      func.func @dequantize_i8(%input : tensor<*xi8>, %scale : tensor<*xf32>, %zp : tensor<*xi32>) -> tensor<*xf32> {
        // Use identity op to avoid the weight being constant-folded.
        %identity = "tf.Identity"(%input) : (tensor<*xi8>) -> tensor<*xi8>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 03 15:43:38 UTC 2023
    - 7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc

                     METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8) {
        // Uniform quantized opset is not supported for weight-only as inputs for
        // weight quantization are floats. And only dequantize_i8 is used from the
        // quantized function library.
        function_library_map = {
            {OpSet::TF, kQuantizedFunctionLibraryInMLIR},
            {OpSet::XLA, kQuantizedFunctionLibraryInMLIR_XLA_WEIGHT_ONLY}};
      } else {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/replace_cast_hacks_with_tf_xla_ops.mlir

        %2 = "tf.PartitionedCall"(%1, %cst_3, %cst_2) {config = "", config_proto = "", executor_type = "", f = @dequantize_i8} : (tensor<1x3x2x2xi8>, tensor<f32>, tensor<i32>) -> tensor<1x3x2x2xf32>
        return %2 : tensor<1x3x2x2xf32>
      }
      func.func private @quantize_i8(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<f32>, %arg2: tensor<i32>) -> tensor<1x3x4x3xi8> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 81K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_tf_drq.mlir

          } : (tensor<*xi32>, tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    
        func.return %out : tensor<*xf32>
      }
    
      // For weight-only
      func.func @dequantize_i8(%input : tensor<*xi8>, %scale : tensor<*xf32>, %zp : tensor<*xi32>) -> tensor<*xf32> {
        // Use identity op to avoid the weight being constant-folded.
        %identity = "tf.Identity"(%input) : (tensor<*xi8>) -> tensor<*xi8>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 03 15:43:38 UTC 2023
    - 12.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir

        func.return %quantize : tensor<*x!tf_type.qint32>
      }
    
      // Dequantize final graph output back to f32. Input is qint8.
      func.func @dequantize_i8(%input : tensor<*x!tf_type.qint8>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>) -> tensor<*xf32> {
        %dequantize = "tf.UniformDequantize"(%input, %input_scale, %input_zp) {
          Tin = "tfdtype$DT_QINT8",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 19.3K bytes
    - Viewed (0)
Back to top