Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 294 for Quantized (0.18 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc

                       QuantizationUnits& quantizable_ops) const {
        bool quantized = false;
    
        for (auto& quant_op : quantizable_ops) {
          if (quant_specs_.inference_type == tensorflow::DT_QINT8) {
            quantized |= quantizeOpAsInt8(rewriter, op, quant_op);
          }
        }
        return quantized;
      }
    
     protected:
      QuantizationSpecs quant_specs_;
      OpSet op_set_;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions_weight_only.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -verify-diagnostics \
    // RUN:     -stablehlo-quantize-composite-functions | FileCheck --check-prefix=CHECK %s
    
    // Test that per-tensor weight-only quantized dot_general op is produced when
    // empty `weight_only_ptq` is provided.
    
    module attributes {tf_saved_model.semantics} {
      func.func private @quantize_dot_general_per_tensor(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> attributes {tf._original_func_name = "main_0"} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

        if bias_fn:
          self.assertTrue(re.search('stablehlo.add.*xi32>', module_str))
        # Consider if there is a way to check if activation fusion is properly
        # done in MLIR level.
        # Tests that the quantized graph outputs similar values. The rtol and atol
        # values are arbitrary.
        self.assertAllClose(new_outputs, expected_outputs, rtol=0.3, atol=0.2)
    
        # Due to other meta data, the compression is not exactly 1/4.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc

        }
        return false;
      }
    
      // Uses `quant_params` to quantize `value` and inserting a pair of
      // tfl.quantize and tfl.dequantize ops for this `value`.
      void QuantizeValue(OpBuilder builder, Value value,
                         quant::QuantParams quant_params);
    
      // If the value hasn't been quantized, the functions adds it to `values`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfr/passes/passes.h

    // Decompose ops.
    std::unique_ptr<OperationPass<func::FuncOp>> CreateDecomposeTFOpsPass(
        std::optional<ModuleOp> tfr_module = std::nullopt);
    
    // Rewrites quantized operands and results with their storage types.
    // This pass should be run at module level after decomposition, if there are
    // quantized operands or results.
    std::unique_ptr<OperationPass<ModuleOp>> CreateRewriteQuantizedIOPass();
    
    // Raise to TF ops.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 08 01:19:25 UTC 2023
    - 2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

                  Eq(TensorType_INT8));
    
      // Verify FC bias should be int32 quantized.
      ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(2))->type(),
                  Eq(TensorType_FLOAT32));
      EXPECT_THAT(subgraph->tensors[op->inputs[2]].get()->type,
                  Eq(TensorType_INT32));
    
      // The output tensor of FC should be int8 quantized.
      ASSERT_THAT(float_graph->tensors()->Get(float_op->outputs()->Get(0))->type(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.td

    include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.td"
    
    // Re-orders the Identity op following a quantized composite function. This
    // allows the QuantizeCompositeFunctionsPass to merge the DequantizeCast with
    // the quantized composite function to optimize the requantization part.
    def ReorderIdentityFollowingQuantizedFunction : Pat<
      (quantfork_DequantizeCastOp:$output
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 1.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/cc/post_calibration.h

    namespace mlir::quant::stablehlo {
    
    // Performs post-calibration graph transformation as part of post-training
    // static-range quantization.
    //
    // The resulting `ModuleOp` contains quantized StableHLO ops serialized in
    // `TF::XlaCallModuleOp`s. They are quantized using the statistics collected
    // after the calibration step, corresponding to each `TF::CustomAggregatorOp`s
    // in the input module op.
    class PostCalibrationComponent : public Component {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 12:53:33 UTC 2024
    - 2.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/quantization/ir/QuantizeUtils.cc

      }
    
      return nullptr;
    }
    
    /// Converts a real expressed DenseFPElementsAttr to a corresponding
    /// DenseElementsAttr (typically DenseIntElementsAttr) containing quantized
    /// storage values assuming the given quantizedElementType and converter.
    static DenseElementsAttr convertDenseFPElementsAttr(
        DenseFPElementsAttr realFPElementsAttr,
        quant::QuantizedType quantizedElementType,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

            op_set=target_opset,
        )
    
        if target_opset != quant_opts_pb2.XLA:
          # Uniform quantized opset is not supported for weight-only
          with self.assertRaisesRegex(
              ValueError, 'TF/Uniform quantized opset does not support weight-only.'
          ):
            converted_model = quantize_model.quantize(
                input_saved_model_path,
                output_directory,
                quantization_options,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
Back to top