- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 52 for Reserve (0.14 sec)
-
pkg/scheduler/framework/runtime/framework_test.go
return tmpPl, nil }); err != nil { t.Fatalf("Unable to register pre bind plugins: %s", pl.name) } configPlugins.Reserve.Enabled = append( configPlugins.Reserve.Enabled, config.Plugin{Name: pl.name}, ) } profile := config.KubeSchedulerProfile{Plugins: configPlugins} ctx, cancel := context.WithCancel(context.Background())
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri May 17 09:07:27 UTC 2024 - 103K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/import_model.cc
const FunctionBody& fbody, absl::InlinedVector<OutputTensor, 4>* arg_nodes, absl::InlinedVector<OutputTensor, 4>* ret_nodes, absl::InlinedVector<Node*, 4>* control_ret_nodes) { arg_nodes->reserve(fbody.arg_nodes.size()); ret_nodes->reserve(fbody.ret_nodes.size()); for (auto arg : fbody.arg_nodes) { arg_nodes->emplace_back(arg, 0); } for (auto ret : fbody.ret_nodes) { ret_nodes->emplace_back(ret, 0); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 183.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
// Compress the operands, region arguments, and outputs. SmallVector<Value, 4> new_while_operands; SmallVector<Type, 4> new_result_types; new_while_operands.reserve(new_num_operands); new_result_types.reserve(new_num_operands); // Build new operands and result type. for (int op_idx : llvm::seq<int>(0, old_num_operands)) { if (removed_operand.test(op_idx)) continue;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 170.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
SmallVector<bool, 4> removed_operand(while_op.getNumOperands(), false); llvm::SmallVector<Type, 4> types; new_operands.reserve(while_op.getNumOperands()); new_body_yield.reserve(while_op.getNumOperands()); types.reserve(while_op.getNumOperands()); // Remove block arguments not used in either cond or body. This leaves the // block arguments of body and cond matching still.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/c/c_api.cc
tensorflow::shape_inference::InferenceContext* ic, int num_dims, const int64_t* dims) { if (num_dims != -1) { std::vector<tensorflow::shape_inference::DimensionHandle> dim_vec; dim_vec.reserve(num_dims); for (int i = 0; i < num_dims; ++i) { dim_vec.push_back(ic->MakeDim(dims[i])); } return ic->MakeShape(dim_vec); } else { return ic->UnknownShape(); } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 03:35:10 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
} // If branches have incompatible input types that means that no tensor can // serve as input to all the functions. Hence, the op is invalid. int expected_num_inputs = op->getNumOperands() - 1; for (int i = 0; i < expected_num_inputs; ++i) { SmallVector<Type, 2> branch_input_i_types; branch_input_i_types.reserve(branches.size()); llvm::transform( branch_types, std::back_inserter(branch_input_i_types),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
pkg/scheduler/schedule_one_test.go
}{ { name: "error reserve pod", sendPod: podWithID("foo", ""), mockResult: mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil}, registerPluginFuncs: []tf.RegisterPluginFunc{ tf.RegisterReservePlugin("FakeReserve", tf.NewFakeReservePlugin(framework.NewStatus(framework.Error, "reserve error"))), },
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jun 04 06:20:55 UTC 2024 - 128.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc
auto input_shape = input_type.getShape(); llvm::SmallVector<int64_t, 4> start; llvm::SmallVector<int64_t, 4> size; start.reserve(explicit_padding.size() / 2); size.reserve(explicit_padding.size() / 2); for (int i = 0, e = explicit_padding.size() / 2; i < e; ++i) { int64_t pre_padding = explicit_padding[2 * i];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 154.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_export.cc
const std::string& summary_title) { std::string op_str; llvm::raw_string_ostream os(op_str); std::vector<std::string> keys; keys.reserve(ops.size()); std::vector<std::string> values; values.reserve(ops.size()); for (auto const& op_name_and_details : ops) { keys.push_back(op_name_and_details.first); for (auto const& op_detail : op_name_and_details.second) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:41:49 UTC 2024 - 164.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc
// because `op` and `loader` are using different MLIR contexts. See comments // on `xla_call_module_context_` for details. std::vector<xla::Shape> input_shapes; input_shapes.reserve(op.getArgs().size()); for (mlir::Type type : op.getArgs().getTypes()) { input_shapes.push_back(xla::TypeToShape(type)); } absl::Status status = loader->RefineDynamicShapes(input_shapes);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 07:28:49 UTC 2024 - 134.1K bytes - Viewed (0)