- Sort Score
- Result 10 results
- Languages All
Results 131 - 140 of 291 for Quantized (0.24 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/unwrap_xla_call_module_op.cc
private: void runOnOperation() override; }; void UnwrapXlaCallModuleOp(TF::XlaCallModuleOp call_op, SymbolTable& symbol_table) { // Do not inline lifted quantized functions used for fusing patterns. // TODO - b/310539922: Remove reference to TF/TFL utils. if (call_op->hasAttr(kQuantTraitAttrName)) { return; } auto function_name = call_op
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc
"composite_conv.*"); // Enable per-channel quantization for convolution weights. QuantizedType conv_weight_quantized_type{}; // Assumes NHWC format, specifying the channel dimension (3) as the // quantized axis. conv_weight_quantized_type.mutable_dimension_specs()->set_dimension(3); // The index of weight operands passed to lifted functions for convolution // is 1. StaticRangePtq& static_range_ptq_spec =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/post_quantize.cc
return success(); } op.replaceAllUsesWith(q.getArg()); return success(); } return failure(); } }; // Replaces constant and uniform_quantize ops with single quantized constant op. class QuantizeConstPattern : public OpRewritePattern<mlir::stablehlo::UniformQuantizeOp> { public: explicit QuantizeConstPattern(MLIRContext* context)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/static_range_ptq.cc
PostCalibrationComponent::kName, *function_aliases, *ctx, *module)); // Remove the `tpu` tag for exporting because the output quantized model is // essentially a CPU model. tags.erase("tpu"); py_function_library.SaveExportedModel( dst_saved_model_path, post_calibrated_exported_model,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 12:49:45 UTC 2024 - 6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/attributes.mlir
// CHECK-SAME: T = !corert.variant %0 = "tf.ZerosLike"(%arg) {device = "/device:CPU:0", T = !tf_type.variant} : (tensor<!tf_type.variant>) -> tensor<!tf_type.variant> func.return } // Checks that TF quantized attrs are lowered to the corert types // CHECK-LABEL: func @quantized_types func.func @quantized_types(%arg0: tensor<!tf_type.resource<tensor<1x3x!tf_type.quint8>>>,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 00:18:59 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc
// This is the argument used to refer to the pass in // the textual format (on the commandline for example). return "quant-propagate-quantize-type"; } StringRef getDescription() const final { // This is a brief description of the pass. return "Propagate quantized type through allowed ops."; } void runOnOperation() override; }; // Propagate dequantize op if the next op supports the data type.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/get_alternative_subgraph.cc
GetInferenceString(device_inference_type.inference_type)); } // For every device, we will do the following: // If the inference type is quantized, we will try the float alternative. // If it's float, we will just keep it as it is. std::vector<InferenceDeviceType> GetAllAlternativeInferenceDeviceType( InferenceType inference_type, ArrayRef<std::string> devices) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/static_range_ptq.h
// `dst_saved_model_path`. // // `quantization_config` configures the quantization behavior for the // static-range PTQ. // // `signature_keys` specify the signatures that correspond to functions to be // quantized. `signature_def_map` connects the signature keys to // `SignatureDef`s. // // Returns a non-OK status when the quantization is not successful. // LINT.IfChange absl::Status QuantizeStaticRangePtq(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 02:44:03 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc
pm.addPass(TFL::CreateModifyIONodesPass(input_mlir_type, output_mlir_type)); // If the first or final ops are not quantized, remove QDQ. pm.addPass(TFL::CreatePostQuantizeRemoveQDQPass()); if (failed(pm.run(module.get()))) { const std::string err(statusHandler.ConsumeStatus().message()); LOG(ERROR) << "Failed to quantize: " << err; return kTfLiteError; } // Export the results.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
let summary = [{ Perform quantized dot of quantized Tensor `lhs` and quantized Tensor `rhs` to make quantized `output`. }]; let description = [{ Given quantized `lhs` and quantized `rhs`, performs quantized dot on `lhs` and `rhs` to make quantized `output`. `lhs` and `rhs` must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0).
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0)