- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 3,615 for Reserve (0.14 sec)
-
tensorflow/compiler/mlir/lite/transforms/optimize_functional_ops.cc
// If this is a terminator, identify the values to use to replace the // original If op. if (op_to_inline.hasTrait<OpTrait::IsTerminator>()) { updated_results.reserve(op_to_inline.getNumOperands()); for (Value operand : op_to_inline.getOperands()) updated_results.push_back(mapper.lookup(operand)); break; } // Otherwise, clone the op here.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_tensor_helper.cc
if (!is_reduce_dim[dim]) { is_reduce_dim[dim] = true; num_reduce_dim++; } } ArrayRef<int64_t> shape = ranked_ty.getShape(); SmallVector<int64_t, 4> out_shape; out_shape.reserve(rank - (keep_dims.getValue() ? 0 : num_reduce_dim)); for (int64_t i = 0; i < rank; ++i) { if (!is_reduce_dim[i]) out_shape.push_back(shape[i]); else if (keep_dims.getValue()) out_shape.push_back(1);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc
MlirOptimizationPassState overall_state = MlirOptimizationPassState::Disabled; // Cache per pass state and reuse it during pass execution. std::vector<MlirOptimizationPassState> per_pass_state; per_pass_state.reserve(registry_->passes().size()); int num_passes_enabled = 0, num_passes_disabled = 0, num_passes_fallback_enabled = 0; for (const auto& pass_registration : registry_->passes()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 18.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
// Compress the operands, region arguments, and outputs. SmallVector<Value, 4> new_while_operands; SmallVector<Type, 4> new_result_types; new_while_operands.reserve(new_num_operands); new_result_types.reserve(new_num_operands); // Build new operands and result type. for (int op_idx : llvm::seq<int>(0, old_num_operands)) { if (removed_operand.test(op_idx)) continue;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 170.8K bytes - Viewed (0) -
src/runtime/netpoll_wasip1.go
) func netpollinit() { // Unlike poll(2), WASI's poll_oneoff doesn't accept a timeout directly. To // prevent it from blocking indefinitely, a clock subscription with a // timeout field needs to be submitted. Reserve a slot here for the clock // subscription, and set fields that won't change between poll_oneoff calls. subs = make([]subscription, 1, 128) evts = make([]event, 0, 128) pds = make([]*pollDesc, 0, 128)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Mar 27 18:23:49 UTC 2024 - 6.1K bytes - Viewed (0) -
pkg/scheduler/schedule_one.go
return ScheduleResult{nominatingInfo: clearNominatedNode}, assumedPodInfo, framework.AsStatus(err) } // Run the Reserve method of reserve plugins. if sts := fwk.RunReservePluginsReserve(ctx, state, assumedPod, scheduleResult.SuggestedHost); !sts.IsSuccess() { // trigger un-reserve to clean up state associated with the reserved Pod fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Jun 06 13:28:08 UTC 2024 - 43.4K bytes - Viewed (0) -
tensorflow/cc/framework/while_gradients.cc
} std::vector<Output> ToOutputVector( const std::vector<OutputTensor>& output_tensors) { const int n = output_tensors.size(); std::vector<Output> result; result.reserve(n); for (int i = 0; i < n; ++i) result.push_back(ToOutput(output_tensors[i])); return result; } // The backprop loop counter and main backprop loop run in their own execution
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter.cc
&hardware_map); // Populate the runtime metadata. std::vector<flatbuffers::Offset<SubgraphMetadata>> subgraphs_metadata; subgraphs_metadata.reserve(funcs.size()); for (auto& func : funcs) { subgraphs_metadata.push_back( CreateSubgraphMetadata(hardware_map, &func.getBody(), &fb_builder)); } auto runtime_metadata =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 06:11:34 UTC 2024 - 7.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/runtime_fallback/runtime_fallback_executor.cc
<< "Wrong number of arguments for function " << function_name.str(); // Prepare function arguments from ready Chain and input Tensors. llvm::SmallVector<tfrt::AsyncValue*> exec_arguments; exec_arguments.reserve(compute->num_arguments()); exec_arguments.push_back(tfrt::GetReadyChain().release()); for (const Tensor& input_tensor : arguments) { auto av = MakeAvailableAsyncValueRef<FallbackTensor>(input_tensor);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 00:18:59 UTC 2024 - 9.1K bytes - Viewed (0) -
cmd/kube-apiserver/app/options/options.go
"overlap with any IP ranges assigned to nodes or pods. Max of two dual-stack CIDRs is allowed.") fs.Var(&s.ServiceNodePortRange, "service-node-port-range", ""+ "A port range to reserve for services with NodePort visibility. This must not overlap with the ephemeral port range on nodes. "+ "Example: '30000-32767'. Inclusive at both ends of the range.") // Kubelet related flags:
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat Apr 27 12:19:56 UTC 2024 - 6.5K bytes - Viewed (0)