- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 20 for Quantized (0.28 sec)
-
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h
// and NumericVerify ops to compare output values from the quantized and float // ops. // // When `legacy_float_scale` is true, the quantizer will use float scale instead // of double, and call TOCO's quantization routines to maintain bit-exactness of // the values with the TOCO quantizer. TfLiteStatus QuantizeModel( absl::string_view model_buffer, const tflite::TensorType &input_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
GetAsVector(expected_tensor->shape())); } // Finds the match of the quantized tensor from the possible tensors. Each // possible tensors can be used only once. It checks shape and name if the // tensor is quantized and also checks buffer contents and tensor type if not // quantized. For the quantized case, tensor type and quantizaction params are // expected to be checked in the test body with the match.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
Eq(TensorType_INT8)); // Verify FC bias should be int32 quantized. ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(2))->type(), Eq(TensorType_FLOAT32)); EXPECT_THAT(subgraph->tensors[op->inputs[2]].get()->type, Eq(TensorType_INT32)); // The output tensor of FC should be int8 quantized. ASSERT_THAT(float_graph->tensors()->Get(float_op->outputs()->Get(0))->type(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
// The following two passes find specific uniform quantization patterns in // StableHLO and converts them to TFLite ops that accept or produce uniform // quantized types. They only target a specific set of models that contain // "decomposed" quantized ops produced from the framework level. This is why // they are placed right after the `LegalizeTFXlaCallModuleToStablehloPass`
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h
// Stores information about how to quantize a user-specified custom operation. // CustomOpInfo contains info of its corresponding CustomOp registered in the // CustomOpMap. 'quantizable_input_indices' is used to determine which indices // of the CustomOp are quantizable. 'is_weight_only' is used specify whether the // custom op is quantized only for storage and dequantized at runtime.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
}]> ]; } def TFL_QuantizeOp: TFL_Op<"quantize", [ FirstAttrDerivedResultType, SameOperandsAndResultShape, NoMemoryEffect]> { let summary = "Quantize operator"; let description = [{ Converts floating point tensors to quantized integer tensors according to the quantization parameters defined in the type attribute. }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
let summary = [{ Perform quantized dot of quantized Tensor `lhs` and quantized Tensor `rhs` to make quantized `output`. }]; let description = [{ Given quantized `lhs` and quantized `rhs`, performs quantized dot on `lhs` and `rhs` to make quantized `output`. `lhs` and `rhs` must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0).
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc
pm.addPass(TFL::CreateModifyIONodesPass(input_mlir_type, output_mlir_type)); // If the first or final ops are not quantized, remove QDQ. pm.addPass(TFL::CreatePostQuantizeRemoveQDQPass()); if (failed(pm.run(module.get()))) { const std::string err(statusHandler.ConsumeStatus().message()); LOG(ERROR) << "Failed to quantize: " << err; return kTfLiteError; } // Export the results.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/passes.h
// TensorFlow. std::unique_ptr<OperationPass<ModuleOp>> CreatePrepareTpuComputationForTfExportPass(); // Rewrites ops that require quantized inputs or outputs to ops that allow // non-quantized inputs and outputs. std::unique_ptr<OperationPass<func::FuncOp>> CreateLowerQuantizedPass(); // Reorders ops so ops of the same dialect are next to each other.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 31.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td
} def LowerQuantizedPass : Pass<"tf-lower-quantized", "mlir::func::FuncOp"> { let summary = "Lowers ops that require quantized input or output."; let description = [{ This pass rewrites all ops that have at least one input or output that must be a quantized type to ops whose inputs and outputs allow non-quantized types. Examples of quantized types are TF_Qint8 or TF_Quint8.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 99.6K bytes - Viewed (0)