- Sort Score
- Result 10 results
- Languages All
Results 131 - 140 of 3,381 for Reserve (0.21 sec)
-
src/runtime/runtime_test.go
// We might get unlucky and the OS might have mapped one of these // addresses, but probably not: they're all in the first page, very high // addresses that normally an OS would reserve for itself, or malformed // addresses. Even so, we might have to remove one or two on different // systems. We will see. var faultAddrs = []uint64{ // low addresses 0, 1, 0xfff,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 11.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/dot_general.cc
template <typename ValueT, typename... RangeTs> llvm::SmallVector<ValueT, 4> Concat(RangeTs&&... ranges) { llvm::SmallVector<int64_t, 4> results; results.reserve(Size(std::forward<RangeTs>(ranges)...)); Append(results, std::forward<RangeTs>(ranges)...); return results; } // A struct to hold information about dimensions of dot_general operands. class DotDimensionsInfo {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.2K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.cc
TF_RET_CHECK(fbody != nullptr); auto& input_args = fbody->record->fdef().signature().input_arg(); int input_arg_size = input_args.size(); std::vector<XlaCompiler::Argument> args; args.reserve(input_arg_size); for (auto& arg_info : flat_arg_shape_and_dtype) { XlaCompiler::Argument arg; arg.kind = XlaCompiler::Argument::kParameter; arg.type = arg_info.dtype; arg.shape = arg_info.shape;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 19K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
} // If branches have incompatible input types that means that no tensor can // serve as input to all the functions. Hence, the op is invalid. int expected_num_inputs = op->getNumOperands() - 1; for (int i = 0; i < expected_num_inputs; ++i) { SmallVector<Type, 2> branch_input_i_types; branch_input_i_types.reserve(branches.size()); llvm::transform( branch_types, std::back_inserter(branch_input_i_types),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
tensorflow/c/eager/gradients.cc
for (int i = 0; i < inputs.size(); i++) { input_ids[i] = ToId(inputs[i]); input_dtypes[i] = inputs[i]->DataType(); } std::vector<TapeTensor> tape_tensors; tape_tensors.reserve(outputs.size()); for (auto t : outputs) { tape_tensors.push_back(TapeTensor(t)); } GradientTape::RecordOperation( op_name, tape_tensors, input_ids, input_dtypes,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 15 09:49:45 UTC 2024 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
// Cast values std::vector<Eigen::half> new_values; DenseFPElementsAttr value_attr = mlir::cast<DenseFPElementsAttr>(op.getValue()); new_values.reserve(value_attr.getNumElements()); constexpr float kMaxFloat16Value = 65504.f; constexpr float kMinFloat16Value = -65504.f; for (auto value : value_attr.template getValues<float>()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc
llvm::SmallVector<Operation*, 4> result; auto it = sorted_control_predecessors_.find(op); if (it == sorted_control_predecessors_.end()) return result; result.reserve(it->getSecond().size()); for (auto predecessor : it->getSecond()) { if (!filter || filter(predecessor)) result.push_back(predecessor); } return result; } const llvm::SmallVector<Operation*, 4>&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 15 09:04:13 UTC 2024 - 41.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/tf_xla_mlir_translate.cc
argument_kinds.clear(); if (input_types_str.empty()) return absl::OkStatus(); std::vector<absl::string_view> argument_kind_strs = absl::StrSplit(input_types_str, ','); argument_kinds.reserve(argument_kind_strs.size()); for (const auto& argument_kind_str : llvm::enumerate(argument_kind_strs)) { const auto& value = argument_kind_str.value(); if (value == "parameter") {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 18.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_head_tail_outside_compilation.cc
new_cluster_results); old_terminator->erase(); builder->setInsertionPoint(cluster); llvm::SmallVector<Type, 4> new_cluster_result_types; new_cluster_result_types.reserve(new_cluster_results.size()); for (const auto& new_cluster_result : new_cluster_results) new_cluster_result_types.push_back(new_cluster_result.getType());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.6K bytes - Viewed (0) -
src/runtime/malloc.go
} else { // On a 32-bit machine, we're much more concerned // about keeping the usable heap contiguous. // Hence: // // 1. We reserve space for all heapArenas up front so // they don't get interleaved with the heap. They're // ~258MB, so this isn't too bad. (We could reserve a // smaller amount of space up front if this is a // problem.) // // 2. We hint the heap to start right above the end of
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0)