- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 123 for getn (0.05 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/hoist_replicate_invariant_resource_writes.cc
tf_device::ReplicateOp replicate_op, llvm::ArrayRef<TF::AssignVariableOp> tail_assign_variable_ops) { const auto num_replicas = replicate_op.getN(); auto return_op = llvm::dyn_cast<tf_device::ReturnOp>( replicate_op.getRegion().front().getTerminator()); // Get the new result types. // TODO(prakalps): Do not add a value to returned values if it is already // returned.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 03 12:35:38 UTC 2022 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/rewrite_tpu_embedding_ops.cc
// then update the OperandSegmentSize attribute. if (send_op) { int32_t operand_sizes[] = {static_cast<int32_t>(send_op.getN()), static_cast<int32_t>(send_op.getNN()), 1}; auto operand_size_attr = builder.getDenseI32ArrayAttr(operand_sizes); NamedAttrList attrs(send_op->getAttrs());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 15 22:55:42 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/replicate_invariant_op_hoisting.cc
// invariant. Shape ops are rewritten to be invariant when possible, prior to // hoisting ops. void HoistReplicateInvariantOps(tf_device::ReplicateOp replicate_op) { const int num_replicas = replicate_op.getN(); Block* replicate_block = &replicate_op.GetBody(); // A `ShapeOp` that directly depends on a `tf_device.replicate` param and does // not have a virtual device is assumed to return the same shape across all
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.5K bytes - Viewed (0) -
platforms/core-configuration/model-core/src/integTest/groovy/org/gradle/model/managed/CyclicalManagedTypeIntegrationTest.groovy
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Sep 28 09:51:04 UTC 2023 - 3.1K bytes - Viewed (0) -
src/runtime/mwbbuf.go
func (b *wbBuf) empty() bool { return b.next == uintptr(unsafe.Pointer(&b.buf[0])) } // getX returns space in the write barrier buffer to store X pointers. // getX will flush the buffer if necessary. Callers should use this as: // // buf := &getg().m.p.ptr().wbBuf // p := buf.get2() // p[0], p[1] = old, new // ... actual memory write ... //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 8.1K bytes - Viewed (0) -
tests/test_ambiguous_params.py
" default value with `=` instead." ), ): @app.get("/") async def get(item_id: Annotated[int, Query(default=1)]): pass # pragma: nocover def test_multiple_annotations(): async def dep(): pass # pragma: nocover @app.get("/multi-query") async def get(foo: Annotated[int, Query(gt=2), Query(lt=10)]): return foo with pytest.raises(
Registered: Mon Jun 17 08:32:26 UTC 2024 - Last Modified: Tue Dec 12 00:22:47 UTC 2023 - 2.1K bytes - Viewed (0) -
platforms/core-configuration/model-groovy/src/integTest/groovy/org/gradle/model/dsl/internal/transform/ModelDslRuleDetectionIntegrationSpec.groovy
} @Managed interface A extends Item { B getB() } @Managed interface B extends Item { C getC() } @Managed interface C extends Item { D getD() } @Managed interface D extends Item { }
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Sep 28 09:51:04 UTC 2023 - 4.3K bytes - Viewed (0) -
src/runtime/net_plan9.go
func runtime_ignoreHangup() { getg().m.ignoreHangup = true } //go:linkname runtime_unignoreHangup internal/poll.runtime_unignoreHangup func runtime_unignoreHangup(sig string) { getg().m.ignoreHangup = false } func ignoredNote(note *byte) bool { if note == nil { return false } if gostringnocopy(note) != "hangup" { return false } return getg().m.ignoreHangup
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 13 18:36:28 UTC 2017 - 645 bytes - Viewed (0) -
src/runtime/debugcall.go
// explaining why. // //go:nosplit func debugCallCheck(pc uintptr) string { // No user calls from the system stack. if getg() != getg().m.curg { return debugCallSystemStack } if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) { // Fast syscalls (nanotime) and racecall switch to the // g0 stack without switching g. We can't safely make
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 05 20:50:21 UTC 2024 - 7.1K bytes - Viewed (0) -
src/runtime/proflabel.go
// //go:linkname runtime_setProfLabel runtime/pprof.runtime_setProfLabel func runtime_setProfLabel(labels unsafe.Pointer) { // Introduce race edge for read-back via profile. // This would more properly use &getg().labels as the sync address, // but we do the read in a signal handler and can't call the race runtime then. // // This uses racereleasemerge rather than just racerelease so
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 2.1K bytes - Viewed (0)