- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 313 for created (0.14 sec)
-
tensorflow/compiler/mlir/tensorflow/utils/visitor.cc
ModuleOp module, llvm::ArrayRef<llvm::StringRef> function_names) { SymbolTableCollection symbol_table; OpBuilder builder(module.getContext()); OwningOpRef<ModuleOp> pruned = builder.create<ModuleOp>(module->getLoc()); (*pruned)->setAttrs(module->getAttrs()); builder.setInsertionPointToEnd(pruned->getBody()); llvm::SmallDenseSet<func::FuncOp> added;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 19 03:46:51 UTC 2023 - 4.1K bytes - Viewed (0) -
tensorflow/cc/framework/gradient_checker.cc
for (int i = 0; i < y_num; i++) { dy_datas[i] = Tensor(ys[i].type(), y_shapes[i]); auto dy_data_flat = dy_datas[i].flat<Y_T>(); dy_data_flat.setZero(); } // Create the feed list. ClientSession::FeedType feed_list; for (int i = 0; i < x_num; i++) { feed_list.insert({xs[i], x_datas[i]}); } for (int i = 0; i < y_num; i++) { feed_list.insert({dys[i], dy_datas[i]});
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 18.2K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
compilation_successful.scalar<bool>()() = false; ctx->set_output(0, compilation_key); ctx->set_output(1, compilation_successful); return; } // Each execution of an XlaCompile op creates a new ExecutableClosure, even // if it didn't have to compile the cluster because of a compilation-cache // hit. This is because we at least need new snapshots of the resource // variables.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/reduce_type_precision.cc
dyn_cast<arith::ConstantOp>(op.getOperand(0).getDefiningOp()); if (!input_op) { return failure(); } Builder builder(op.getContext()); auto new_gather_op = rewriter.create<TFL::GatherOp>( op.getLoc(), /*result=*/ mlir::cast<TensorType>(op.getResult().getType()) .clone(builder.getI4Type()), /*operand=*/op.getOperands(), op->getAttrs());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/hoist_replicate_invariant_resource_writes.cc
assign.getValue().getType()); } OpBuilder builder(replicate_op); // Clone this old replicate op but with new result types. auto new_replicate_op = builder.create<tf_device::ReplicateOp>( replicate_op->getLoc(), new_result_types, replicate_op->getOperands(), replicate_op->getAttrs()); // Move region to the new op.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 03 12:35:38 UTC 2022 - 5.8K bytes - Viewed (0) -
tensorflow/cc/framework/gradients.cc
std::vector<Output>* grad_outputs); // Returns a list mapping whether each node in the graph is reachable // from outputs_. Keyed by node id. std::vector<bool> GetReachableNodes(); // Creates the gradient subgraph for a while loop (or just stores // `summed_grads` if not all incoming gradients are available yet). All exit // nodes (which are the first nodes of a loop encountered in the backwards
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 22K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
const std::string& profiler_name = GetPjRtDeviceCompilationProfilerResourceName(device_type); bool deleted_old_device_compiler = false; // Lookup the DeviceCompiler, create one if not found. Status s = rm->Lookup<PjRtDeviceCompiler>( rm->default_container(), compiler_name, pjrt_device_compiler); if (s.ok() && device_type == DEVICE_TPU) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/perception_ops_utils_test.cc
const SmallVector<mlir::Type, NOutput>& output_types) { auto func_type = builder->getFunctionType(input_types, output_types); auto func = func::FuncOp::create( mlir::NameLoc::get(builder->getStringAttr("fused_func")), "fused_func", func_type, {}); func.addEntryBlock(); mlir::StringAttr attr_value = builder->getStringAttr("MaxUnpooling2D");
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Sep 29 21:02:21 UTC 2022 - 7.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.cc
} else { /* If we're not running on a TPU node, we might not be able to * actually call the part of the TPU API that gives us layout. * This happens e.g. for unit tests. Below we just create a reasonable * layout. We sort by dimension size, which makes the layout agree with * the "correct" TPU layout in surprisingly many cases. * Note that the corresponding InfeedEnqueue op will be generated
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/c/eager/c_api_unified_experimental.cc
using tensorflow::tracing::TracingTensorHandle; void TF_SetTracingImplementation(const char* name, TF_Status* s) { tsl::Set_TF_Status_from_Status(s, SetDefaultTracingEngine(name)); } // Creates a new TensorFlow function, it is an execution context attached to a // given tracing context. TF_ExecutionContext* TF_CreateFunction(const char* fn_name, TF_Status* s) { return wrap(CreateTracingExecutionContext(fn_name, s));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 10:15:17 UTC 2024 - 9K bytes - Viewed (0)