Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 283 for quantize (0.39 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/tests/pick-subgraphs.mlir

        %3 = "tfl.dequantize"(%arg2) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<100x!quant.uniform<i8:f32, 2.000000e-01:-3>>) -> tensor<100xf32>
        %4 = tfl.mul %2, %3 {fused_activation_function = "RELU6", tac.device = "GPU", tac.inference_type = "FLOAT"} : tensor<100xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_weight.cc

        // 1. Collect quantizable ops.
        QuantizationUnits quantizable_ops = GetQuantizableOps(op);
        if (quantizable_ops.empty()) {
          return failure();
        }
    
        // 2. Quantize collected ops.
        if (!QuantizeOps(rewriter, op, quantizable_ops)) {
          return failure();
        }
    
        // 3. Complete the Q-DQ pair for each inference type.
        if (!ConvertToFloat16Constant(rewriter, op)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h

    // Stores information about how to quantize a user-specified custom operation.
    // CustomOpInfo contains info of its corresponding CustomOp registered in the
    // CustomOpMap. 'quantizable_input_indices' is used to determine which indices
    // of the CustomOp are quantizable. 'is_weight_only' is used specify whether the
    // custom op is quantized only for storage and dequantized at runtime.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/common/tfl_pass_config.h

      bool reduce_type_precision = false;
      // Whether to consider this model a quantized model with quantize/dequantize
      // ops and to convert kernels to quantized kernels wherever appropriate.
      quant::QDQConversionMode qdq_conversion_mode =
          quant::QDQConversionMode::kQDQNone;
    
      // When set to true, StableHLO Quantizer is run. The full configuration for
      // the quantizer is at `TocoFlags::quantization_config`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:05:30 UTC 2024
    - 6.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/python/wrap_converter.py

        enable_whole_model_verify,
        denylisted_ops,
        denylisted_nodes,
        enable_variable_quantization,
        disable_per_channel_for_dense_layers,
        debug_options_str,
    ):
      """Wraps experimental mlir quantize model."""
      return _pywrap_converter_api.ExperimentalMlirQuantizeModel(
          input_data_str,
          disable_per_channel,
          fully_quantize,
          inference_type,
          input_data_type,
          output_data_type,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 18:18:30 UTC 2024
    - 3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_composite_functions.cc

    };
    
    void QuantizeCompositeFunctionsPass::runOnOperation() {
      MLIRContext& ctx = getContext();
    
      PassManager pm(&ctx);
      // Intermediate output from QuantizePass will have quantized ops
      // (XlaCallModuleOps) with quantized input and output types, which are not
      // allowed in the TF dialect.
      pm.enableVerifier(false);
    
      PrepareQuantizePassOptions options;
      options.enable_per_channel_quantized_weight_ =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 02:59:01 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

            per_axis_type.getStorageTypeMin(), per_axis_type.getStorageTypeMax());
      }
    
      auto quantize = builder.create<quantfork::QuantizeCastOp>(
          q_op.getLoc(), new_value_type.clone(new_qtype), new_value);
      auto dequantize = builder.create<quantfork::DequantizeCastOp>(
          dq_op.getLoc(), new_value_type, quantize.getResult());
      return dequantize.getResult();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc

          func_name, rewriter, quant_type, val_to_dequantize, result_type,
          LogicsForUniformDequanization);
    
      return dequant_op;
    }
    }  // namespace
    
    // Generate quantize and dequantize functions with uniform quantization.
    std::optional<TF::PartitionedCallOp> ApplyUniformQuantization(
        PatternRewriter& rewriter, TF::ConstOp op,
        tensorflow::quantization::QuantizationComponentSpec& weight_spec) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11K bytes
    - Viewed (0)
  9. tensorflow/compiler/aot/BUILD

    )
    
    filegroup(
        name = "quantize_header",
        srcs = ["quantize.h"],
        visibility = ["//visibility:public"],
    )
    
    cc_library(
        name = "tfcompile_lib",
        srcs = [
            "codegen.cc",
            "compile.cc",
            "flags.cc",
        ],
        hdrs = [
            "codegen.h",
            "compile.h",
            "flags.h",
            "quantize.h",
        ],
        compatible_with = [],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 11 16:13:05 UTC 2024
    - 11.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

        lines.push_back(absl::StrFormat(
            "Number of quantized layers with quantized outputs: %d/%d",
            total_quantized_func_count - float_output_func_count,
            total_quantized_func_count));
        lines.push_back(absl::StrFormat("Number of quantize layers added: %d",
                                        quantize_func_count));
        lines.push_back(absl::StrFormat("Number of dequantize layers added: %d",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
Back to top