Search Options

Results per page
Sort
Preferred Languages
Advance

Results 91 - 100 of 200 for requantize (0.13 sec)

  1. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h

      // weights but will dequantize them back at runtime which is useful for
      // memory bound case without kernel support available in lower precisions.
      // Used in MLIR dynamic range quantizer.
      bool weight_only_quantization = false;
    
      // The minimum number of elements in a weights array required to apply
      // quantization. This is especially useful not to quantize small tensors as
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 10:16:19 UTC 2024
    - 10.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc

        };
    
        // If the op is the pass-through op with (3x) smaller output, the dequantize
        // op can be pushed down to the single result of this op.
        if (!llvm::dyn_cast<mlir::SameScalesOpInterface>(passthrough_op) ||
            passthrough_op->getNumResults() != 1) {
          return failure();
        }
        // Only push down the dequantize op when the output is smaller, so that it
        // can have smaller memory usage.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d_v2.mlir

      // CHECK:      {
      // CHECK-NEXT:  version: 3,
      // CHECK-NEXT:  operator_codes: [ {
      // CHECK-NEXT:    deprecated_builtin_code: 6,
      // CHECK-NEXT:    version: 1,
      // CHECK-NEXT:    builtin_code: DEQUANTIZE
      // CHECK-NEXT:  }, {
      // CHECK-NEXT:    deprecated_builtin_code: 4,
      // CHECK-NEXT:    version: 2,
      // CHECK-NEXT:    builtin_code: DEPTHWISE_CONV_2D
      // CHECK-NEXT:  } ],
      // CHECK-NEXT:  subgraphs: [ {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 9.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

        const auto float_graph = model_->subgraphs()->Get(subgraph_idx);
        // The output graph should have an extra tensor from the added dequantize
        // op.
        ASSERT_EQ(quantized_graph->tensors()->size(),
                  float_graph->tensors()->size() + 1);
        // Check that a dequantize op exists.
        int32_t dequant_input_idx = -1;
        int32_t dequant_output_idx = -1;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d.mlir

      // CHECK:      {
      // CHECK-NEXT:  version: 3,
      // CHECK-NEXT:  operator_codes: [ {
      // CHECK-NEXT:    deprecated_builtin_code: 6,
      // CHECK-NEXT:    version: 1
      // CHECK-NEXT:    builtin_code: DEQUANTIZE
      // CHECK-NEXT:  }, {
      // CHECK-NEXT:    deprecated_builtin_code: 4,
      // CHECK-NEXT:    version: 1
      // CHECK-NEXT:    builtin_code: DEPTHWISE_CONV_2D
      // CHECK-NEXT:  } ],
      // CHECK-NEXT:  subgraphs: [ {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc

            returned_type = quant::ConvertSignedQuantizedToUnsigned(
                dequantize_input.getType(), dequantize_op.getLoc());
            // replace the dequantize op by a quantize op
            TypeAttr type_attr = TypeAttr::get(returned_type);
            auto quantize_op = builder.create<QuantizeOp>(
                dequantize_op.getLoc(), returned_type, dequantize_input, type_attr);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tfr/passes/decompose_patterns.td

    include "mlir/IR/PatternBase.td"
    include "mlir/Dialect/Arith/IR/ArithOps.td"
    include "mlir/Dialect/Func/IR/FuncOps.td"
    include "tensorflow/compiler/mlir/tfr/ir/tfr_ops.td"
    
    class Quantize<string value> : NativeCodeCall<"TFR::Quantize(" # value # ", $0, $1, $_builder)">;
    
    class HasStringAttr<string value> : AttrConstraint<
        CPred<"$_self.cast<StringAttr>().getValue() == \"" # value # "\"">>;
    
    def QuantActRangeNonePattern :
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Sep 29 21:02:21 UTC 2022
    - 2.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir

    }
    // CHECK-LABEL: uniform_dequantize_op_ui16_storage_input
    // CHECK: stablehlo.uniform_dequantize
    // CHECK-NOT: tfl.dequantize
    
    // -----
    
    // Tests that the pattern doesn't match when the input quantized tensor's
    // storage type is i32. i32 storage type is not compatible with
    // `tfl.dequantize`.
    
    func.func @uniform_dequantize_op_i32_storage_input(%arg: tensor<2x2x!quant.uniform<i32:f32, 1.000000e+0:8>>) -> tensor<2x2xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 106.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/optimize_batch_matmul.mlir

      %0 = arith.constant dense<[[1.0], [2.0]]> : tensor<2x1xf32>
      %1 = "tfl.quantize"(%0) {qtype = tensor<2x1x!quant.uniform<i8:f32, 0.024986599940879671:92>>} : (tensor<2x1xf32>) -> tensor<2x1x!quant.uniform<i8:f32, 0.024986599940879671:92>>
      %2 = "tfl.dequantize"(%1) : (tensor<2x1x!quant.uniform<i8:f32, 0.024986599940879671:92>>) -> tensor<2x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

        ):
          quantize_model.quantize(
              self._input_saved_model_path,
              self._output_saved_model_path,
              quantization_options=quantization_options,
              representative_dataset=representative_dataset,
          )
    
        converted_model = quantize_model.quantize(
            self._input_saved_model_path,
            self._output_saved_model_path,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
Back to top