Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for same_scale_op (0.18 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

            Args:
              weight_shape: Shape of the weight tensor.
              same_scale_op: Name of the same-scale op to be tested. Raises error
                when an unknown name is given.
            """
            self.filters = np.random.uniform(low=-1.0, high=1.0, size=weight_shape)
            self.same_scale_op = same_scale_op
    
          @def_function.function
          def matmul_and_same_scale(
              self, input_tensor: core.Tensor
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

          self,
          same_scale_op: str,
      ):
        input_shape = (2, 3, 1, 1024)
        filter_shape = (2, 3, 1024, 3)
        static_input_shape = [dim if dim is not None else 2 for dim in input_shape]
    
        model = self._create_matmul_and_same_scale_model(
            input_shape,
            filter_shape,
            self._input_saved_model_path,
            same_scale_op,
        )
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc

      // routines need to be added around that op. When the assumption changes,
      // this policy might change as well.
      bool IsConnectedWithCompsiteFunction(Operation* same_scale_op) const {
        for (const auto& operand : same_scale_op->getOperands()) {
          auto dq_op = dyn_cast_or_null<quantfork::DequantizeCastOp>(
              operand.getDefiningOp());
          if (!dq_op) continue;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

            has_quantized_types = true;
          }
        }
      }
      return has_quantized_types;
    }
    
    bool IsConnectedWithQuantizedCompsiteFunction(Operation* same_scale_op) {
      for (const Value operand : same_scale_op->getOperands()) {
        auto dq_op =
            dyn_cast_or_null<quantfork::DequantizeCastOp>(operand.getDefiningOp());
        if (!dq_op) continue;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h

    // could not beat the overhead of the quantize and dequantize routines need to
    // be added around that op. When the assumption changes, this policy might
    // change as well.
    bool IsConnectedWithQuantizedCompsiteFunction(Operation* same_scale_op);
    
    // A base rewrite pattern which matches any N-in-M-out operations with
    // quantization parameters propagated to at least one of its operands. The
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc

        stats_op.erase();
      }
    
      // Returns false if the steps finish without errors.
      return false;
    }
    
    LogicalResult VerifySameScales(Operation* op) {
      auto same_scale_op = cast<SameScalesOpInterface>(op);
    
      SmallVector<QuantizedType, 4> collected_quant_params;
      for (Value input : op->getOperands()) {
        QuantizedType quant_params =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 43.2K bytes
    - Viewed (0)
Back to top