- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 32 for onStop (0.22 sec)
-
tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
edge->src()->name(), ":", edge->src_output()); } } Subgraph& dst_subgraph = subgraphs_[dst_func_id]; // Ignore control edges entering the subgraph. We will lift them onto // the enclosing call operators in BuildOutputGraph(). if (!edge->IsControlEdge()) { TF_RETURN_IF_ERROR( dst_subgraph.RecordArg(edge, node_images, src_arg_pairs)); } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 51K bytes - Viewed (0) -
src/regexp/syntax/parse.go
hsub := p.calcHeight(sub, false) if h < 1+hsub { h = 1 + hsub } } p.height[re] = h return h } // Parse stack manipulation. // push pushes the regexp re onto the parse stack and returns the regexp. func (p *parser) push(re *Regexp) *Regexp { p.numRunes += len(re.Rune) if re.Op == OpCharClass && len(re.Rune) == 2 && re.Rune[0] == re.Rune[1] { // Single rune.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 02 13:59:01 UTC 2024 - 52.1K bytes - Viewed (0) -
tensorflow/c/kernels_test.cc
OpsTestBase::SetDevice(DEVICE_GPU, std::move(device)); #endif TF_ASSERT_OK(NodeDefBuilder(op_name, op_name).Finalize(node_def())); TF_ASSERT_OK(InitOp()); } #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM const char* device_name_ = tensorflow::DEVICE_GPU; #else const char* device_name_ = tensorflow::DEVICE_CPU; #endif };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 50.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc
func.getRegion().getOps<TF::GlobalIterIdOp>()) { auto loc = global_iter_id_op->getLoc(); builder.setInsertionPointAfter(global_iter_id_op); auto offset = builder.create<TF::ConstOp>( loc, builder.getI64IntegerAttr(offset_value)); auto new_global_iter_id = builder.create<TF::AddV2Op>( loc, global_iter_id_op->getResultTypes(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 92.9K bytes - Viewed (0) -
operator/pkg/apis/istio/v1alpha1/values_types.proto
// The mode defines the action the controller will take when a pod is detected as broken. // If deletePods is true, the controller will delete the broken pod. The pod will then be rescheduled, hopefully onto a node that is fully ready. // Note this gives the DaemonSet a relatively high privilege, as it can delete any Pod. bool deletePods = 7;
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Mon Jun 03 01:55:05 UTC 2024 - 57.2K bytes - Viewed (0) -
platforms/software/dependency-management/src/main/java/org/gradle/api/internal/artifacts/ivyservice/resolveengine/graph/builder/NodeState.java
// variant and here we are in the case the "including" component said that transitive // should be false so we need to arbitrarily carry that onto the dependency metadata assert dependencies.size() == 1; dependencies = Collections.singletonList(makeNonTransitive(dependencies.get(0))); }
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Fri Jun 07 14:19:34 UTC 2024 - 58.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
/*allow_float=*/false, tensor_type_, output_buffer_); ASSERT_THAT(status, Eq(kTfLiteOk)); // Verify ConstOp is quantized. const auto& subgraph = model_.subgraphs[0]; auto op = subgraph->operators[0].get(); ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op->opcode_index].get()), Eq(BuiltinOperator_ADD));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc
device_cluster.getLoc(), RankedTensorType::get({}, builder.getI64Type()), builder.getI64IntegerAttr(core)); } else { device_ordinal_op = builder.create<mlir::TF::ConstOp>( device_cluster.getLoc(), DenseIntElementsAttr::get( RankedTensorType::get({}, builder.getI64Type()), static_cast<int64_t>(0))); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 68.3K bytes - Viewed (0) -
src/runtime/asm_amd64.s
// Set m->sched.sp = SP, so that if a panic happens // during the function we are about to execute, it will // have a valid SP to run on the g0 stack. // The next few lines (after the havem label) // will save this SP onto the stack and then write // the same SP back to m->sched.sp. That seems redundant, // but if an unrecovered panic happens, unwindm will // restore the g->sched.sp from the stack location
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 20:38:24 UTC 2024 - 60.4K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph)); auto clusters = GetClusters(*graph); EXPECT_TRUE(clusters.empty()); } TEST(XlaCompilationTest, ConstOp) { // valid data type { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); Scope root = Scope::NewRootScope().ExitOnError(); auto c = ops::Const(root.WithOpName("const"), 0.5f);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 10:11:10 UTC 2024 - 79.6K bytes - Viewed (0)