- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 178 for pushBack (0.26 sec)
-
tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter.cc
if (!main_fn) return std::string(""); flatbuffers::FlatBufferBuilder fb_builder; std::vector<mlir::func::FuncOp> funcs; funcs.push_back(main_fn); module.walk([&](mlir::func::FuncOp fn) { if (fn != main_fn) { funcs.push_back(fn); } }); // Populate the hardware metadata. // And collect the hardwares used. std::map<std::string, uint8_t> hardware_map;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 06:11:34 UTC 2024 - 7.5K bytes - Viewed (0) -
tensorflow/cc/client/client_session.cc
output_tensor_names.reserve(fetch_outputs.size()); for (auto const& output : fetch_outputs) { output_tensor_names.push_back(output.name()); } std::vector<string> target_node_names; target_node_names.reserve(run_outputs.size()); for (auto const& output : run_outputs) { target_node_names.push_back(output.node()->name()); } TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 28 09:04:10 UTC 2024 - 7.1K bytes - Viewed (0) -
tensorflow/cc/gradients/data_flow_grad.cc
// reshape back into a data-shaped tensor to propagate gradients for the data // input. grad_outputs->push_back(Reshape(scope, reconstructed, Shape(scope, data))); // Stop propagation along the partitions input grad_outputs->push_back(NoGradient()); return scope.status(); } REGISTER_GRADIENT_OP("DynamicPartition", DynamicPartitionGrad);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jul 24 13:40:35 UTC 2021 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_outline_tpu_island.cc
func_operand_types.reserve(operands.size()); for (Value operand : operands) func_operand_types.push_back(operand.getType()); // Function results are the yield operands SmallVector<Type, 16> func_result_types; for (Value operand : island_op.GetYield().getOperands()) func_result_types.push_back(operand.getType()); FunctionType func_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/xla_rewrite.cc
for (const Value &arg : cluster_func_op.getOperands()) { if (!mlir::isa<TF::ResourceType>(getElementTypeOrSelf(arg.getType()))) { non_resource_args.push_back(arg); if (has_resources) in_order = false; } else { resource_args.push_back(arg); has_resources = true; } } if (!in_order) { // Functions do not get reused in practice, so skip the check for if the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/low_bit_utils.cc
unpacked_buffer.reserve(num_elements); for (uint8_t value : src_buffer) { // Cast to signed before right-shifting to ensure correct sign extension unpacked_buffer.push_back(static_cast<int8_t>(value << 4) >> 4); unpacked_buffer.push_back(static_cast<int8_t>(value) >> 4); } // The last element might be a padded zero, so check and pop if needed if (unpacked_buffer.size() > num_elements) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 04 19:11:58 UTC 2023 - 2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.cc
for (const mlir::Type &t : types) { if (mlir::isa<mhlo::TokenType>(t)) continue; auto layout = GetTPUInfeedLayout({t}, rewriter); if (failed(layout)) return failure(); v.push_back(layout.value()); } ArrayRef<Attribute> shape(v); return rewriter.getArrayAttr(shape); } else if (mlir::isa<TupleType>(types[0])) { auto tuple_type = mlir::dyn_cast<TupleType>(types[0]);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold.cc
inputs.push_back(preceding_const_op.getValue()); } SmallVector<Attribute> result_values; if (failed(TF::EvaluateOperation(op, inputs, result_values))) { return failure(); } results.clear(); builder.setInsertionPointAfter(op); for (const auto& result_value : result_values) { results.push_back(builder.create<TF::ConstOp>(op->getLoc(), result_value)); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/importer_test_min_max.cc
input_tensor->quantization->scale.clear(); input_tensor->quantization->zero_point.clear(); input_tensor->quantization->min.push_back(-1.0); input_tensor->quantization->max.push_back(1.0); auto& output_tensor = sub_graph->tensors[op->outputs[0]]; auto shape = output_tensor->shape; output_tensor->quantization->scale.clear();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_parallel_execute_sink_resource_write.cc
for (auto result : llvm::enumerate(results)) { TF::AssignVariableOp assign_var = GetSingleUseResourceWrite(parallel_execute, result.value()); if (!assign_var) { results_to_remap.push_back(result.value()); continue; } // Move AssignVariableOp and update the value to be written to the // resource variable to be the non forwarded value from within the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 06 04:46:18 UTC 2022 - 6.6K bytes - Viewed (0)