- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 16 for output_node (0.18 sec)
-
tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc
TF_RET_CHECK(le->src_output() < num_outputs); Node* output_node = le->dst(); if (add_edges_to_output_of_downstream_nodes) { TF_RET_CHECK(output_node->type_string() == kXlaClusterOutput) << le->DebugString(); nodes_to_remove.push_back(output_node); for (const Edge* oe : output_node->out_edges()) { TF_RET_CHECK(!oe->IsControlEdge());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 15.1K bytes - Viewed (0) -
tensorflow/cc/framework/gradients.cc
} } } // Finally, we set stop_backprop_nodes to all output_nodes that aren't also // internal_outputs. std::unordered_set<int> stop_backprop_nodes; for (int output_node : output_nodes) { if (internal_outputs.find(output_node) == internal_outputs.end()) { stop_backprop_nodes.insert(output_node); } } return stop_backprop_nodes; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 22K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); // Nothing should change. ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size()); for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); subgraph_idx++) { const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc
output_builder.GetSize()); tflite::ModelT output_model; output_fbm->GetModel()->UnPackTo(&output_model); // Extract output metadata std::map<std::string, std::string> output_metadata; for (const auto& metadata : output_model.metadata) { const auto& data = output_model.buffers[metadata->buffer]->data; output_metadata[metadata->name] = std::string(data.begin(), data.end());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jun 10 20:16:40 UTC 2024 - 2.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_main_function.cc
for (StringRef output_name : fn_output_names) { if (output_name_set.contains(output_name)) { // Found a duplicated name, all output names will be prefixed by // their corresponding function names. need_prefix_for_output_name = true; } output_name_set.insert(output_name); fn_output_name_vec.push_back(function_name);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 16.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/fake_session.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Feb 26 03:47:51 UTC 2024 - 7.3K bytes - Viewed (0) -
hack/apidiff.sh
PATH="${GOBIN}:${PATH}" echo "Installing apidiff into ${GOBIN}." go install golang.org/x/exp/cmd/apidiff@latest cd "${KUBE_ROOT}" # output_name targets a target directory and prints the base name of # an output file for that target. output_name () { what="$1" echo "${what}" | sed -e 's/[^a-zA-Z0-9_-]/_/g' -e 's/$/.out/' } # run invokes apidiff once per target and stores the output
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri Apr 26 09:00:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.cc
return absl::OkStatus(); } Status ParseOutputArrayInfo(const std::vector<string>& output_names, std::vector<string>* outputs) { for (auto& output_name : output_names) { if (output_name.empty()) continue; outputs->push_back(output_name); } return absl::OkStatus(); } Status ParseInputArrayInfo(absl::string_view array_names, absl::string_view data_types,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 10.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/converter_python_api.cc
FromTocoDataTypeToTflitToTensorType(output_data_type); std::string output_model; const absl::string_view input_model_buffer(buf, length); auto status = mlir::lite::QuantizeModel( input_model_buffer, input_type, output_type, inference_tensor_type, /*operator_names=*/{}, disable_per_channel, fully_quantize, output_model, enable_numeric_verify, enable_whole_model_verify,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 19.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
quantization_options = tf.quantization.experimental.QuantizationOptions( signature_keys=['your_signature_key'], ) tf.quantization.experimental.quantize_saved_model( '/tmp/input_model', '/tmp/output_model', quantization_options=quantization_options, ) # When quantizing a model trained without QAT (Post-Training Quantization), # a representative dataset is required.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0)