- Sort Score
- Result 10 results
- Languages All
Results 71 - 80 of 3,381 for Reserve (0.14 sec)
-
pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go
} for _, node := range nodes { if !slices.Contains(schedulingCtx.Spec.PotentialNodes, node.Node().Name) { return false } } return true } // Reserve reserves claims for the pod. func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) { if !pl.enabled { return nil } state, err := getStateData(cs)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu May 30 15:22:37 UTC 2024 - 75.9K bytes - Viewed (0) -
pkg/kubelet/apis/config/types.go
// This flag accepts a list of options. Acceptable options are `pods`, `system-reserved` & `kube-reserved`. // Refer to [Node Allocatable](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) doc for more information. EnforceNodeAllocatable []string // This option specifies the cpu list reserved for the host level system threads and kubernetes related threads.
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Mar 05 21:10:42 UTC 2024 - 35.1K bytes - Viewed (0) -
platforms/core-execution/persistent-cache/src/test/groovy/org/gradle/cache/internal/locklistener/DefaultFileLockContentionHandlerTest.groovy
} def "reserving port is safely reentrant"() { when: int port = handler.reservePort() then: handler.reservePort() == port } def "cannot reserve port when the handler was stopped"() { handler.stop() when: handler.reservePort() then: thrown(IllegalStateException) }
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Tue Apr 16 15:49:49 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.cc
return failure(); } return static_cast<int32_t>(value); } FailureOr<SmallVector<int32_t>> CastI64ArrayToI32( const ArrayRef<int64_t> int64_array) { SmallVector<int32_t> int32_array{}; int32_array.reserve(int64_array.size()); for (const int64_t i64 : int64_array) { FailureOr<int32_t> cast_i32 = CastI64ToI32(i64); if (failed(cast_i32)) return failure(); int32_array.push_back(*cast_i32); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc
return success(); } // Extract individual tensor list element and combine them using the tf.Pack // op. Location loc = op.getLoc(); llvm::SmallVector<Value, 4> values; values.reserve(tensors.size()); for (const tensorflow::Tensor &tensor : tensors) { auto attr_or = tensorflow::ConvertTensor(tensor, &rewriter); if (!attr_or.ok()) return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 20:00:43 UTC 2024 - 70.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/cluster_outlining.cc
func::FuncOp BuildFunction(llvm::ArrayRef<Value> live_ins, ClusterOrLaunchOp op, SymbolTable* symbol_table, OpBuilder* builder) { llvm::SmallVector<Type, 4> operand_types; operand_types.reserve(live_ins.size()); for (Value v : live_ins) operand_types.emplace_back(v.getType()); auto func_type = builder->getFunctionType(operand_types, op.getResultTypes()); std::string func_name;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_functional_ops.cc
// If this is a terminator, identify the values to use to replace the // original If op. if (op_to_inline.hasTrait<OpTrait::IsTerminator>()) { updated_results.reserve(op_to_inline.getNumOperands()); for (Value operand : op_to_inline.getOperands()) updated_results.push_back(mapper.lookup(operand)); break; } // Otherwise, clone the op here.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_tensor_helper.cc
if (!is_reduce_dim[dim]) { is_reduce_dim[dim] = true; num_reduce_dim++; } } ArrayRef<int64_t> shape = ranked_ty.getShape(); SmallVector<int64_t, 4> out_shape; out_shape.reserve(rank - (keep_dims.getValue() ? 0 : num_reduce_dim)); for (int64_t i = 0; i < rank; ++i) { if (!is_reduce_dim[i]) out_shape.push_back(shape[i]); else if (keep_dims.getValue()) out_shape.push_back(1);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc
MlirOptimizationPassState overall_state = MlirOptimizationPassState::Disabled; // Cache per pass state and reuse it during pass execution. std::vector<MlirOptimizationPassState> per_pass_state; per_pass_state.reserve(registry_->passes().size()); int num_passes_enabled = 0, num_passes_disabled = 0, num_passes_fallback_enabled = 0; for (const auto& pass_registration : registry_->passes()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 18.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
// Compress the operands, region arguments, and outputs. SmallVector<Value, 4> new_while_operands; SmallVector<Type, 4> new_result_types; new_while_operands.reserve(new_num_operands); new_result_types.reserve(new_num_operands); // Build new operands and result type. for (int op_idx : llvm::seq<int>(0, old_num_operands)) { if (removed_operand.test(op_idx)) continue;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 170.8K bytes - Viewed (0)