Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 28 of 28 for quant_specs_ (0.18 sec)

  1. tensorflow/compiler/mlir/lite/common/tfl_pass_config.h

    namespace mlir {
    namespace TFL {
    
    // A config that controls which passes get run as part TFLite converter.
    struct PassConfig {
      explicit PassConfig(quant::QuantizationSpecs specs)
          : quant_specs(std::move(specs)) {}
    
      // If `emit_builtin_tflite_ops` is true, TF Lite legalization passes will be
      // added, which produces TF Lite ops.
      bool emit_builtin_tflite_ops = true;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:05:30 UTC 2024
    - 6.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc

      // Initialize for tests.
      void initializeForTest() {
        if (!test_mode_) return;
    
        tensorflow::quantization::QuantizationComponentSpec quant_spec;
        quant_spec.set_quantization_component(
            tensorflow::quantization::QuantizationComponentSpec::COMPONENT_WEIGHT);
        quant_spec.set_tensor_type(
            tensorflow::quantization::QuantizationComponentSpec::TENSORTYPE_INT_8);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 11.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

            CreatePrepareQuantizePass(quant_specs, quantization_method_));
        pm.addNestedPass<func::FuncOp>(
            CreateQuantizePass(quant_specs, target_opset_));
        pm.addNestedPass<func::FuncOp>(CreatePostQuantizePass());
      } else {
        // Apply weight quantization.
        quant_specs.minimum_elements_for_weights = min_num_elements_for_weights_;
        quant_specs.weight_quantization = true;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

    std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass(
        QuantizationSpecs quant_specs,
        tensorflow::quantization::OpSet target_opset);
    
    // Creates an instance of the PrepareQuantize pass, which will perform similar
    // transformations as TFL::PrepareQuantizePass.
    std::unique_ptr<OperationPass<func::FuncOp>> CreatePrepareQuantizePass(
        const QuantizationSpecs& quant_specs,
        tensorflow::quantization::QuantizationMethod::PresetMethod
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.h

    // input arrays.
    Status PopulateQuantizationSpecs(
        const toco::ModelFlags& model_flags, toco::TocoFlags& toco_flags,
        mlir::quant::QuantizationSpecs* quant_specs,
        std::vector<string>* node_names, std::vector<string>* node_dtypes,
        std::vector<std::optional<std::vector<int>>>* node_shapes,
        std::vector<std::optional<double>>* node_mins,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun May 12 12:39:37 UTC 2024
    - 3.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

        absl::flat_hash_set<std::string> ops_blocklist =
            quant_params_.quant_spec.ops_blocklist;
        absl::flat_hash_set<std::string> nodes_blocklist =
            quant_params_.quant_spec.nodes_blocklist;
        CustomMap custom_map = quant_params_.quant_spec.custom_map;
    
        // Rewrite the floating-point ops to the quantized version, by fusing
        // preceding dequantize ops and succeding quantize ops.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h

                                  absl::string_view max_values,
                                  absl::string_view inference_type,
                                  QuantizationSpecs* quant_specs);
    
    // Gets the quantization specification for input arrays. The array names are not
    // stored in the spec, and will be matched by position. The min/max will be
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 10:16:19 UTC 2024
    - 10.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize="is-qdq-conversion=true" | FileCheck --check-prefix=QDQ %s
    
    // CHECK-LABEL: main
    // Uses `main` function to match the default target function of QuantSpecs and
    // execute the production code path.
    func.func @main(%arg0: tensor<2x1xf32>, %arg1: tensor<2x3xf32>) -> (tensor<2x4xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
Back to top