Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 40 for _tfr_quant (0.15 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

                               %bias_scale : tensor<*xf32>, %bias_zp : tensor<*xi32>,
                               %out_scale : tensor<*xf32>, %out_zp : tensor<*xi32>) -> tensor<*x${output_type}>
            attributes {tf_quant.quantized_ops = ${quantized_ops}} {
          %0 = "tf.PartitionedCall"(%input, %filter, %input_scale, %input_zp,
                                      %filter_scale, %filter_zp) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_tf_drq.mlir

                             %input : tensor<*xf32>, %weight : tensor<*xi8>,
                             %weight_scale : tensor<*xf32>, %weight_zp : tensor<*xi32>) -> tensor<*xf32>
          attributes {tf_quant.quantized_ops = ${quantized_ops}} {
    
        %input_scale, %input_zp = "tf.PartitionedCall"(%input) {
            config = "", config_proto = "", executor_type = "", f=@internal_calculate_quant_params
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 03 15:43:38 UTC 2023
    - 12.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver_with_skipping.mlir

      // CHECK: return
    
      func.func private @composite_dot_general_with_relu_fn_1(%arg0: tensor<10x1x1024xf32>, %arg1: tensor<10x1024x3xf32>) -> tensor<10x1x3xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
        %cst = stablehlo.constant dense<0.000000e+00> : tensor<10x1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize_per_channel.mlir

      // CHECK-LABEL: composite_conv2d_with_bias_and_relu6_fn_10
      func.func private @composite_conv2d_with_bias_and_relu6_fn_10(%arg0: tensor<1x3x2x3xf32>, %arg1: tensor<2x3x3x2xf32>, %arg2: tensor<2xf32>) -> tensor<1x2x2x2xf32> attributes {tf.tf_quant.composite_function} {
        %0 = "quantfork.stats"(%arg1) {layerStats = dense<[-3.54062747, 0.54742622]> : tensor<2xf32>} : (tensor<2x3x3x2xf32>) -> tensor<2x3x3x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 26 07:48:15 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir

                              %bias_scale : tensor<*xf32>, %bias_zp : tensor<*xi32>,
                              %out_scale : tensor<*xf32>, %out_zp : tensor<*xi32>) -> tensor<*x${output_type}>
        attributes {tf_quant.quantized_ops = ${quantized_ops}} {
          // Given the convolution takes 2 qint8 inputs and output a qint32.
          // The accumulation scale is (input_scale * filter_scale).
          // The accumulation zero point is 0.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 19.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq.mlir

        return %2 : tensor<*xf32>
      }
    
      func.func private @composite_conv2d_with_bias_and_relu6_fn_10(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x2xf32>, %arg2: tensor<2xf32>) -> tensor<*xf32> attributes {tf.tf_quant.composite_function} {
        %0 = "quantfork.stats"(%arg1) {layerStats = dense<[-9.54062747, 9.54742622]> : tensor<2xf32>} : (tensor<2x3x3x2xf32>) -> tensor<2x3x3x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 01 10:21:29 UTC 2023
    - 9.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_weights.mlir

        %0 = "tf.PartitionedCall"(%arg0, %arg1) {config = "", config_proto = "", executor_type = "", f = @inner_fn} : (tensor<1x2x2x3xf32>, tensor<2x1024xf32>) -> tensor<*xf32>
        return %0 : tensor<*xf32>
      }
    
      func.func private @inner_fn(%arg0: tensor<1x2x2x3xf32>, %arg1: tensor<2x1024xf32>) -> tensor<*xf32> attributes {tf_quant.composite_function} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 42K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.cc

                                                              stablehlo_func_id);
      }
    
      // TODO - b/298966126: Currently quantizable functions are identified in TF
      // Quantizer via the tf_quant.composite_function UnitAttr attached to
      // func ops. We remove this attribute as this interferes with VHLO conversion.
      // Remove this temporary hack.
      for (auto func_op : module_op.getOps<func::FuncOp>()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 21K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir

    // CHECK: }
    
    // CHECK-LABEL: func private @composite_conv2d_with_bias_and_relu6_fn_1(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x2xf32>, %arg2: tensor<2xf32>) -> tensor<*xf32> attributes {tf_quant.composite_function} {
    // CHECK-NEXT: %[[CONV2D_0:.*]] = "tf.Conv2D"(%arg0, %arg1)
    // CHECK-SAME: data_format = "NHWC", dilations = [1, 1, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

    constexpr StringRef kQuantizeFuncName = "quantize_i8";
    constexpr StringRef kDequantizeFuncName = "dequantize_i8";
    constexpr StringRef kAttrMapAttribute = "attr_map";
    constexpr StringRef kQuantizedOpsAttribute = "tf_quant.quantized_ops";
    constexpr StringRef kCompositeFuncPrefix = "composite_";
    constexpr StringRef kQuantizedFuncPrefix = "quantized_";
    constexpr StringRef kFloatOutputFuncSuffix = "_float_output_fn";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
Back to top