- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 324 for created (0.62 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc
config.mutable_calibration_options() ->mutable_representative_datasets() ->Add(preset_datasets.begin(), preset_datasets.end()); } // Create a new `QuantizationSpecs` to replace the existing one. The // expansion from `StaticRangePtqPreset` gets populated first and then // user-provided explicit `QuantizationSpec`s will be appended. QuantizationSpecs new_specs{};
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc
/*allow_partial_conversion=*/false); auto pass_instrumentors = mlir::GetPassInstrumentors(); for (const auto& creator : pass_instrumentors) { tf2xla.addInstrumentation(creator()); } if (DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain) || VLOG_IS_ON(1)) { tensorflow::DumpMlirOpToFile(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 17:24:39 UTC 2024 - 45.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc
return convert(attr); } return nullptr; } DenseElementsAttr UniformQuantizedPerAxisValueConverter::convert( DenseFPElementsAttr attr) { // Creates the converter for each chunk. Normally the size of the // quantization dim is 3, so we can cache all the converters. ShapedType type = attr.getType(); std::size_t dim_size = type.getDimSize(quantization_dim_);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/visitor.cc
ModuleOp module, llvm::ArrayRef<llvm::StringRef> function_names) { SymbolTableCollection symbol_table; OpBuilder builder(module.getContext()); OwningOpRef<ModuleOp> pruned = builder.create<ModuleOp>(module->getLoc()); (*pruned)->setAttrs(module->getAttrs()); builder.setInsertionPointToEnd(pruned->getBody()); llvm::SmallDenseSet<func::FuncOp> added;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 19 03:46:51 UTC 2023 - 4.1K bytes - Viewed (0) -
tensorflow/c/experimental/ops/resource_variable_ops.cc
#include "tensorflow/core/framework/types.h" #include "tensorflow/core/platform/errors.h" using tensorflow::tracing::MaybeSetOpName; namespace tensorflow { namespace ops { // Op: VarHandleOp() // Summary: Creates a handle to a Variable resource. // // Description: Status VarHandleOp(AbstractContext* ctx, AbstractTensorHandle** resource, DataType dtype, const PartialTensorShape shape,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 5.1K bytes - Viewed (0) -
tensorflow/cc/framework/gradient_checker.cc
for (int i = 0; i < y_num; i++) { dy_datas[i] = Tensor(ys[i].type(), y_shapes[i]); auto dy_data_flat = dy_datas[i].flat<Y_T>(); dy_data_flat.setZero(); } // Create the feed list. ClientSession::FeedType feed_list; for (int i = 0; i < x_num; i++) { feed_list.insert({xs[i], x_datas[i]}); } for (int i = 0; i < y_num; i++) { feed_list.insert({dys[i], dy_datas[i]});
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 18.2K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
compilation_successful.scalar<bool>()() = false; ctx->set_output(0, compilation_key); ctx->set_output(1, compilation_successful); return; } // Each execution of an XlaCompile op creates a new ExecutableClosure, even // if it didn't have to compile the cluster because of a compilation-cache // hit. This is because we at least need new snapshots of the resource // variables.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/reduce_type_precision.cc
dyn_cast<arith::ConstantOp>(op.getOperand(0).getDefiningOp()); if (!input_op) { return failure(); } Builder builder(op.getContext()); auto new_gather_op = rewriter.create<TFL::GatherOp>( op.getLoc(), /*result=*/ mlir::cast<TensorType>(op.getResult().getType()) .clone(builder.getI4Type()), /*operand=*/op.getOperands(), op->getAttrs());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/hoist_replicate_invariant_resource_writes.cc
assign.getValue().getType()); } OpBuilder builder(replicate_op); // Clone this old replicate op but with new result types. auto new_replicate_op = builder.create<tf_device::ReplicateOp>( replicate_op->getLoc(), new_result_types, replicate_op->getOperands(), replicate_op->getAttrs()); // Move region to the new op.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 03 12:35:38 UTC 2022 - 5.8K bytes - Viewed (0) -
tensorflow/cc/framework/gradients.cc
std::vector<Output>* grad_outputs); // Returns a list mapping whether each node in the graph is reachable // from outputs_. Keyed by node id. std::vector<bool> GetReachableNodes(); // Creates the gradient subgraph for a while loop (or just stores // `summed_grads` if not all incoming gradients are available yet). All exit // nodes (which are the first nodes of a loop encountered in the backwards
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 22K bytes - Viewed (0)