- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 93 for created (0.07 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc
config.mutable_calibration_options() ->mutable_representative_datasets() ->Add(preset_datasets.begin(), preset_datasets.end()); } // Create a new `QuantizationSpecs` to replace the existing one. The // expansion from `StaticRangePtqPreset` gets populated first and then // user-provided explicit `QuantizationSpec`s will be appended. QuantizationSpecs new_specs{};
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc
return convert(attr); } return nullptr; } DenseElementsAttr UniformQuantizedPerAxisValueConverter::convert( DenseFPElementsAttr attr) { // Creates the converter for each chunk. Normally the size of the // quantization dim is 3, so we can cache all the converters. ShapedType type = attr.getType(); std::size_t dim_size = type.getDimSize(quantization_dim_);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/reduce_type_precision.cc
dyn_cast<arith::ConstantOp>(op.getOperand(0).getDefiningOp()); if (!input_op) { return failure(); } Builder builder(op.getContext()); auto new_gather_op = rewriter.create<TFL::GatherOp>( op.getLoc(), /*result=*/ mlir::cast<TensorType>(op.getResult().getType()) .clone(builder.getI4Type()), /*operand=*/op.getOperands(), op->getAttrs());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.cc
} else { /* If we're not running on a TPU node, we might not be able to * actually call the part of the TPU API that gives us layout. * This happens e.g. for unit tests. Below we just create a reasonable * layout. We sort by dimension size, which makes the layout agree with * the "correct" TPU layout in surprisingly many cases. * Note that the corresponding InfeedEnqueue op will be generated
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/c/eager/c_api_unified_experimental.cc
using tensorflow::tracing::TracingTensorHandle; void TF_SetTracingImplementation(const char* name, TF_Status* s) { tsl::Set_TF_Status_from_Status(s, SetDefaultTracingEngine(name)); } // Creates a new TensorFlow function, it is an execution context attached to a // given tracing context. TF_ExecutionContext* TF_CreateFunction(const char* fn_name, TF_Status* s) { return wrap(CreateTracingExecutionContext(fn_name, s));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 10:15:17 UTC 2024 - 9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/debugging/dump_tensor_op.cc
} // `DumpTensor` op saves entire value of input to as a tensor proto into a // specified directory and filename. When enabled is set to false, op is // disabled and won't save any value. It also creates `QuantizationUnit` proto // with `func_name` and `node_name` to identify the op. REGISTER_OP("DumpTensor") .Input("tensor_data: T") .Attr("log_dir_path: string") .Attr("file_name: string") .Attr("T: type")
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 03:12:17 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tpu_model_to_cpu.cc
module_op.emitError() << "quant-convert-tpu-model-to-cpu pattern " "conversion did not converge."; signalPassFailure(); return; } } } // namespace // Creates an instance of `ConvertTpuModelToCpuPass`. std::unique_ptr<OperationPass<ModuleOp>> CreateConvertTpuModelToCpuPass() { return std::make_unique<ConvertTpuModelToCpuPass>(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/device_compiler_test_helper.cc
#include "tensorflow/compiler/jit/xla_compilation_cache.pb.h" #include "xla/service/hlo.pb.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/public/session.h" namespace tensorflow { namespace { // Creates a float tensor of linearly increasing values, starting from offset. Tensor CreateInputTensor(const TensorShape& shape, float offset) { Tensor tensor(DT_FLOAT, shape); for (int64 i = 0; i < tensor.flat<float>().size(); ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 09 08:24:16 UTC 2024 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/prepare_quantize.cc
ctx); if (failed(applyPatternsAndFoldGreedily(func_op, std::move(patterns_2)))) { signalPassFailure(); } } } } // namespace // Creates an instance of the TensorFlow dialect PrepareQuantize pass. std::unique_ptr<OperationPass<ModuleOp>> CreatePrepareQuantizePass( const bool enable_per_channel_quantized_weight, const int bit_width) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 03 05:11:03 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/lift_quantizable_spots_as_functions.cc
if (failed(ApplyQuantizationSpec(spec, module_op))) { signalPassFailure(); return; } } } } // namespace // Creates `LiftQuantizableSpotsAsFunctionsPass` with user-defined // `QuantizationSpecs`. std::unique_ptr<OperationPass<ModuleOp>> CreateLiftQuantizableSpotsAsFunctionsPass( const QuantizationSpecs& quantization_specs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 9.3K bytes - Viewed (0)