- Sort Score
- Result 10 results
- Languages All
Results 1 - 3 of 3 for DCHECK_GT (0.11 sec)
-
tensorflow/compiler/jit/xla_device_context.cc
->DoHostCallback([device_to_host_stream] {}) .IgnoreError(); } } }); } se::Stream* XlaDeviceContext::GetDeviceToDeviceStream() { DCHECK_GT(device_to_device_streams_.size(), 0); absl::MutexLock lock(&mu_); int stream = next_stream_; next_stream_ = (next_stream_ + 1) % device_to_device_streams_.size(); return device_to_device_stream(stream); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 12.7K bytes - Viewed (0) -
tensorflow/c/while_loop_test.cc
~CApiWhileLoopTest() override { TF_DeleteGraph(graph_); TF_DeleteStatus(s_); } void Init(int ninputs) { DCHECK(inputs_.empty()); DCHECK_GT(ninputs, 0); for (int i = 0; i < ninputs; ++i) { TF_Operation* placeholder = Placeholder( graph_, s_, ::tensorflow::strings::StrCat("p", i).c_str());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 11 06:05:56 UTC 2024 - 15.3K bytes - Viewed (0) -
tensorflow/cc/framework/gradients.cc
// processed yet. This happens if not all outputs of a node are in 'inputs_'. std::unordered_map<Node*, int> requested_grads; for (const Output& nout : inputs_) { if (pending_[nout.node()->id()] > 0) { DCHECK_GT(nout.node()->num_outputs(), 1); int idx = input_nodes_[nout]; DCHECK(((*grad_outputs_)[idx].node() == nullptr)); TF_RETURN_IF_ERROR(SumGradients(nout, &(*grad_outputs_)[idx]));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 22K bytes - Viewed (0)