- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 129 for created (0.07 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/debugging/dump_tensor_op.cc
} // `DumpTensor` op saves entire value of input to as a tensor proto into a // specified directory and filename. When enabled is set to false, op is // disabled and won't save any value. It also creates `QuantizationUnit` proto // with `func_name` and `node_name` to identify the op. REGISTER_OP("DumpTensor") .Input("tensor_data: T") .Attr("log_dir_path: string") .Attr("file_name: string") .Attr("T: type")
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 03:12:17 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tpu_model_to_cpu.cc
module_op.emitError() << "quant-convert-tpu-model-to-cpu pattern " "conversion did not converge."; signalPassFailure(); return; } } } // namespace // Creates an instance of `ConvertTpuModelToCpuPass`. std::unique_ptr<OperationPass<ModuleOp>> CreateConvertTpuModelToCpuPass() { return std::make_unique<ConvertTpuModelToCpuPass>(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/device_compiler_test_helper.cc
#include "tensorflow/compiler/jit/xla_compilation_cache.pb.h" #include "xla/service/hlo.pb.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/public/session.h" namespace tensorflow { namespace { // Creates a float tensor of linearly increasing values, starting from offset. Tensor CreateInputTensor(const TensorShape& shape, float offset) { Tensor tensor(DT_FLOAT, shape); for (int64 i = 0; i < tensor.flat<float>().size(); ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 09 08:24:16 UTC 2024 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/prepare_quantize.cc
ctx); if (failed(applyPatternsAndFoldGreedily(func_op, std::move(patterns_2)))) { signalPassFailure(); } } } } // namespace // Creates an instance of the TensorFlow dialect PrepareQuantize pass. std::unique_ptr<OperationPass<ModuleOp>> CreatePrepareQuantizePass( const bool enable_per_channel_quantized_weight, const int bit_width) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 03 05:11:03 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/lift_quantizable_spots_as_functions.cc
if (failed(ApplyQuantizationSpec(spec, module_op))) { signalPassFailure(); return; } } } } // namespace // Creates `LiftQuantizableSpotsAsFunctionsPass` with user-defined // `QuantizationSpecs`. std::unique_ptr<OperationPass<ModuleOp>> CreateLiftQuantizableSpotsAsFunctionsPass( const QuantizationSpecs& quantization_specs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 9.3K bytes - Viewed (0) -
tensorflow/cc/framework/cc_ops_test.cc
test::GetTensors(root, {add}, &out); test::ExpectTensorNear<float>(out[0], test::AsTensor<float>({42.0f}, {}), 1e-5); out.clear(); // Note : GetTensors creates a new session, so 'v' is uninitialized. // sub should have no control deps, so it should not cause the assign to run. // Hence is_inited should be false. test::GetTensors(root, {sub, is_inited}, &out);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 15 15:13:38 UTC 2023 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf_while.cc
new_operands.push_back(block->addArgument(t, loc)); auto call = builder.create<func::CallOp>(loc, func, new_operands); builder.create<YieldOp>(loc, call.getResults()); // Mark old function as private so that it can be DCE'd if not called. func.setPrivate(); } void RunOnWhile(TF::WhileOp while_op) { Operation* op = while_op.getOperation(); // Create new TFL While op that will be used to replace TF While op.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 03 12:35:38 UTC 2022 - 3.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/cast_bf16_ops_to_f32.cc
if (failed(applyPatternsAndFoldGreedily(module_op, std::move(patterns)))) { module_op.emitError() << "quant-cast-bf16-ops-to-f32 failed."; signalPassFailure(); } } } // namespace // Creates an instance of the Cast BF16 ops to F32 pass. std::unique_ptr<OperationPass<ModuleOp>> CreateCastBf16OpsToF32Pass() { return std::make_unique<CastBf16OpsToF32Pass>(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc
"attribute in the quantized composite function."; signalPassFailure(); } } } } // namespace // Creates an instance of the pass for inserting quantized functions. std::unique_ptr<OperationPass<ModuleOp>> CreateInsertQuantizedFunctionsPass( QuantMethod quantization_method, OpSet target_opset) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc
return; } auto new_func_type = builder.getFunctionType(new_input_types, new_output_types); func.setType(new_func_type); } } // namespace // Creates an instance of the TensorFlow Lite modify io nodes pass. std::unique_ptr<OperationPass<func::FuncOp>> CreateModifyIONodesPass( Type input_type, Type output_type) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.9K bytes - Viewed (0)