Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for fake_quant_with_min_max_args (0.44 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

        )
        # Insert fake quant to simulate a QAT model.
        weight = array_ops.fake_quant_with_min_max_args(
            weight, min=-0.1, max=0.2, num_bits=8, narrow_range=False
        )
    
        # shape: (2, 2)
        output_tensor = math_ops.matmul(matmul_input, weight)
        # Insert fake quant to simulate a QAT model.
        output_tensor = array_ops.fake_quant_with_min_max_args(
            output_tensor, min=-0.2, max=0.2, num_bits=8, narrow_range=False
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/ir/QuantOps.td

      }];
    
      let description = [{
        Given a const min, max, num_bits and narrow_range attribute, applies the
        same uniform quantization simulation as is done by the TensorFlow
        fake_quant_with_min_max_args op. See the fakeQuantAttrsToType() utility
        method and the quant-convert-simulated-quantization pass for further details.
      }];
    
      let arguments = (ins
        F32Tensor:$inputs,
        F32Attr:$min,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 09 03:10:59 UTC 2024
    - 10.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.td

      }];
    
      let description = [{
        Given a const min, max, num_bits and narrow_range attribute, applies the
        same uniform quantization simulation as is done by the TensorFlow
        fake_quant_with_min_max_args op. See the fakeQuantAttrsToType() utility
        method and the quant-convert-simulated-quantization pass for further details.
      }];
    
      let arguments = (ins
        F32Tensor:$inputs,
        F32Attr:$min,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 13 12:46:08 UTC 2022
    - 10.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

          def _matmul(self, x, y):
            x = array_ops.fake_quant_with_min_max_vars(
                x,
                min=ops.convert_to_tensor(self._min[0]),
                max=ops.convert_to_tensor(self._max[0]),
                num_bits=8,
                narrow_range=False,
            )
            y = array_ops.fake_quant_with_min_max_vars(
                y,
                min=ops.convert_to_tensor(self._min[1]),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir

      // CHECK: return [[VAL8]] : tensor<1x8x2xf32>
      func.return %2 : tensor<1x8x2xf32>
    }
    
    func.func @fake_quant_with_min_max_args(%arg0 : tensor<?x?xf32>) -> tensor<?x?xf32> {
      // CHECK-DAG: [[VAL0:%.+]] = "tf.Const"() <{value = dense<1.275000e+02> : tensor<f32>}>
      // CHECK-DAG: [[VAL1:%.+]] = "tf.Const"() <{value = dense<1.00392163> : tensor<f32>}>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 92K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize-skip-quantization-ops.mlir

    // RUN: odml-to-stablehlo-opt %s --tf-stablehlo=skip-quantization-ops=false | FileCheck %s --check-prefix=CHECK-NOSKIP
    
    func.func @fake_quant_with_min_max_vars(%arg0: tensor<1x1x28x48xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x28x48xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 14 07:38:29 UTC 2022
    - 676 bytes
    - Viewed (0)
  7. RELEASE.md

            `tf.math.zeta`.
        *   New endpoints in `tf.quantization` namespace:
            `tf.quantization.dequantize`,
            `tf.quantization.fake_quant_with_min_max_args`,
            `tf.quantization.fake_quant_with_min_max_args_gradient`,
            `tf.quantization.fake_quant_with_min_max_vars`,
            `tf.quantization.fake_quant_with_min_max_vars_gradient`,
            `tf.quantization.fake_quant_with_min_max_vars_per_channel`,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 730.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

    `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
    
    
    Examples
    
    ```python
    
    inp = tf.constant ([10.03, -10.23, 3])
    out = tf.quantization.fake_quant_with_min_max_args(inp, min=-5, max=5,
                                                       num_bits=16)
    print(out)
    
    #  Output:
    #  tf.Tensor([ 4.9999237 -5.0000763  3.0000763], shape=(3,), dtype=float32)
    ```
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
Back to top