- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 93 for created (0.16 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/cast_bf16_ops_to_f32.cc
if (failed(applyPatternsAndFoldGreedily(module_op, std::move(patterns)))) { module_op.emitError() << "quant-cast-bf16-ops-to-f32 failed."; signalPassFailure(); } } } // namespace // Creates an instance of the Cast BF16 ops to F32 pass. std::unique_ptr<OperationPass<ModuleOp>> CreateCastBf16OpsToF32Pass() { return std::make_unique<CastBf16OpsToF32Pass>(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc
"attribute in the quantized composite function."; signalPassFailure(); } } } } // namespace // Creates an instance of the pass for inserting quantized functions. std::unique_ptr<OperationPass<ModuleOp>> CreateInsertQuantizedFunctionsPass( QuantMethod quantization_method, OpSet target_opset) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc
return; } auto new_func_type = builder.getFunctionType(new_input_types, new_output_types); func.setType(new_func_type); } } // namespace // Creates an instance of the TensorFlow Lite modify io nodes pass. std::unique_ptr<OperationPass<func::FuncOp>> CreateModifyIONodesPass( Type input_type, Type output_type) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/xla_call_module_serialization.cc
return WalkResult::advance(); }); if (result.wasInterrupted()) { return failure(); } return builder.getArrayAttr(function_list); } // Creates a pruned module containing the XlaCallModule's entry function and // other functions transitively called by the entry function. FailureOr<OwningOpRef<ModuleOp>> PruneStablehloModule(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
} else { builder.setInsertionPointToStart(&block); } TypeAttr type_attr = TypeAttr::get(new_type); auto quantize = builder.create<TFL::QuantizeOp>(value.getLoc(), new_type, value, type_attr); auto dequantize = builder.create<TFL::DequantizeOp>( value.getLoc(), expressed_type, quantize.getOutput()); value.replaceAllUsesWith(dequantize);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc
if (failed(applyPatternsAndFoldGreedily(func, frozen_patterns))) { func.emitError() << "quant-propagate-quantize-type failed."; signalPassFailure(); } } } } // namespace // Creates an instance of the TensorFlow dialect PropagateQuantizeType pass. std::unique_ptr<OperationPass<ModuleOp>> CreatePropagateQuantizeTypePass() { return std::make_unique<PropagateQuantizeType>(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/tfrt_fallback/batch_function_fallback_benchmark_test.cc
#include "tfrt/support/rc_array.h" // from @tf_runtime #include "tfrt/tensor/dense_host_tensor.h" // from @tf_runtime #include "tfrt/tensor/tensor.h" // from @tf_runtime namespace tensorflow { namespace { // Creates a BEF file with a program that runs // tfrt_fallback_async.batch_function with a empty function forwarding inputs or // outputs. std::pair<tfrt::BefBuffer, tfrt::RCReference<tfrt::BEFFile>> CreateBefFile(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Dec 08 08:08:48 UTC 2023 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc
if (constant.getType().getRank() != 2) return failure(); // Create a tfl.transpose op that performs ZX transpose on `input`. auto create_z_x_transpose_op = [&](Value input) -> Value { RankedTensorType input_type = mlir::cast<RankedTensorType>(input.getType()); const int input_rank = input_type.getRank(); // Create a 1D I32 tensor for representing the dimension permutation.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/utils.cc
ConstantOp GetScalarConstOfType(Type ty, Location loc, int64_t raw_value, OpBuilder* builder) { return builder->create<ConstantOp>(loc, hlo::getScalarOfType(ty, raw_value)); } ConstantOp GetScalarNegZeroOfType(Type ty, Location loc, OpBuilder* builder) { return builder->create<ConstantOp>(loc, hlo::getScalarNegZeroOfType(ty)); } DenseIntElementsAttr GetI64ElementsAttr(ArrayAttr attr) { RankedTensorType ty =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_util.cc
// _Arg nodes, and let CompileGraph walk it. This could be optimized. std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); // First create the actual node we care about computing. TF_ASSIGN_OR_RETURN(Node * main_node, graph->AddNode(node_def)); // Create dummy _Arg nodes. Link these to `node` and also via a control // dependency edge to the _SOURCE node. for (int64_t i = 0, end = args.size(); i < end; ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 4.6K bytes - Viewed (0)