- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 93 for getUses (0.17 sec)
-
platforms/core-execution/workers/src/main/java/org/gradle/workers/internal/WorkerDaemonClient.java
workerClient.stop(); } public void kill() { workerClient.stopNow(); } DaemonForkOptions getForkOptions() { return forkOptions; } public int getUses() { return uses; } public KeepAliveMode getKeepAliveMode() { return forkOptions.getKeepAliveMode(); } public LogLevel getLogLevel() { return logLevel; }
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu May 30 19:54:37 UTC 2024 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/replicate_tensor_list_init_ops_pass.cc
template <typename T> void ReplicateTensorListForUses(T tensor_list_op) { Value tensor_list = tensor_list_op.getResult(); std::vector<OpOperand*> uses; for (auto& use : tensor_list.getUses()) { uses.emplace_back(&use); } OpBuilder builder(tensor_list_op.getOperation()); for (OpOperand* operand : uses) { auto new_op = builder.clone(*tensor_list_op.getOperation());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Jan 22 17:28:34 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/constant_op_device_assignment.cc
} OpBuilder builder(op); llvm::StringMap<mlir::Operation *> cloned_op_by_device; bool all_uses_replaced = true; for (mlir::OpOperand &use : llvm::make_early_inc_range(op.getResult().getUses())) { mlir::Operation *user_op = use.getOwner(); StringAttr device_attr = user_op->getAttrOfType<StringAttr>(kDeviceAttr); if (!device_attr) { all_uses_replaced = false; continue; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 23:50:19 UTC 2022 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/topological_sort.cc
while (!todo.empty()) { Value value = todo.front(); todo.pop(); // All operations that have all their inputs available are good to go. // Uses, not Users, in case getUsers ever dedups. for (OpOperand& operand : value.getUses()) { Operation* user = ancestor[operand.getOwner()]; remaining_incoming_data_edges[user]--; if (remaining_incoming_data_edges[user] == 0 &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Nov 08 17:01:11 UTC 2022 - 5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc
if (!function_name.starts_with(kDequantizeFunctionName)) return failure(); llvm::SmallVector<Operation*> users(op->getUsers().begin(), op->getUsers().end()); bool changed = false; for (auto& use : op->getUses()) { Operation* user_op = use.getOwner(); int user_idx = use.getOperandNumber(); if (!IsOpWithInt8TypeOperand(user_op)) continue;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_parallel_execute_sink_resource_write.cc
// before or above the parallel_execute. TF::AssignVariableOp GetSingleUseResourceWrite( tf_device::ParallelExecuteOp parallel_execute, Value result) { if (!result.hasOneUse()) return nullptr; OpOperand& use = *result.getUses().begin(); auto assign_var = dyn_cast<TF::AssignVariableOp>(use.getOwner()); if (!assign_var) return nullptr; if (use.get() != assign_var.getValue()) return nullptr;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 06 04:46:18 UTC 2022 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/hoist_broadcast_read.cc
Value res = read.getResource(); Operation* scope = res.getParentBlock()->getParentOp(); if (!scope->isProperAncestor(replicate)) continue; bool has_conflicting_write = false; for (OpOperand& use : res.getUses()) { Operation* using_op = use.getOwner(); if (using_op == read) continue; if (!replicate->isProperAncestor(using_op)) continue; Operation* peer = GetAncestorBelow(using_op, replicate);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/cluster_formation.cc
for (const auto& p : llvm::zip(live_outs, launch_op.getResults())) { Value from = std::get<0>(p); // TODO(jingpu): move this to RegionUtils.h in MLIR core. for (auto& use : llvm::make_early_inc_range(from.getUses())) { if (launch_op_region->isAncestor(use.getOwner()->getParentRegion())) continue; use.set(std::get<1>(p)); } } } // Get all escaped live-out values of a region.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 05 13:30:21 UTC 2023 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/fuse_convolution_pass.cc
}); } // For dynamic case, the result of conv should be used by shape_of and mul. if (is_dynamic_broadcast) { auto conv_uses = (*conv_op.getODSResults(0).begin()).getUses(); if (std::distance(conv_uses.begin(), conv_uses.end()) != 2 || quant::FindUserOfType<shape::ShapeOfOp>(conv_op) == nullptr || quant::FindUserOfType<mhlo::MulOp>(conv_op) == nullptr) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 22:21:19 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_hashtables.cc
mlir::isa<IntegerType>(key_dtype) && mlir::cast<IntegerType>(key_dtype).getWidth() == 64))) { return false; } for (auto& use : hashtable->getUses()) { Operation* user = use.getOwner(); // Allow consuming hash table ops that can be covered by TensorFlow Lite // hash table kernels.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.6K bytes - Viewed (0)