- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 828 for shake (0.05 sec)
-
src/vendor/golang.org/x/sys/cpu/cpu.go
HasSHA1 bool // K{I,L}MD-SHA-1 functions HasSHA256 bool // K{I,L}MD-SHA-256 functions HasSHA512 bool // K{I,L}MD-SHA-512 functions HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions HasVX bool // vector facility HasVXE bool // vector-enhancements facility 1 _ CacheLinePad } func init() { archInit() initOptions() processOptions() }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 16:12:58 UTC 2024 - 12.1K bytes - Viewed (0) -
subprojects/core/src/integTest/groovy/org/gradle/configuration/ExecuteUserLifecycleListenerBuildOperationIntegrationTest.groovy
verifyHasChildren(whenReadyEvaluated, initScriptAppId, 'init', expectedGradleOps) } def 'no extra executions for composite builds'() { // This test does two things: // - shake out internal listener registration that isn't using InternalListener. // There are a lost of listeners registered through the methods that we've decorated in the composite build code
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon May 20 11:16:24 UTC 2024 - 40.2K bytes - Viewed (0) -
src/runtime/pprof/pprof_test.go
} } }() // Short-lived goroutines exercise different code paths (goroutines with // status _Gdead, for instance). This churn doesn't have behavior that // we can test directly, but does help to shake out data races. ready.Add(1) var churn func(i int) churn = func(i int) { SetGoroutineLabels(WithLabels(ctx, Labels(t.Name()+"-churn-i", fmt.Sprint(i)))) if i == 0 { ready.Done()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 18:42:28 UTC 2024 - 68.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/shape-inference.mlir
// RUN: tf-opt -split-input-file -verify-diagnostics --tf-shape-inference %s | FileCheck %s module attributes {tf.versions = {producer = 888 : i32}} { // CHECK-LABEL: testConv2dShapeValidPadding func.func @testConv2dShapeValidPadding(%arg0: tensor<1x112x80x128xf32>, %arg1: tensor<128x3x3x128xf32>, %arg2: tensor<128xf32>) -> tensor<1x?x?x128xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 11.5K bytes - Viewed (0) -
src/runtime/mbarrier.go
// this will shade it. // // 3. Once a goroutine's stack is black, the shade(ptr) becomes // unnecessary. shade(ptr) prevents hiding an object by moving it from // the stack to the heap, but this requires first having a pointer // hidden on the stack. Immediately after a stack is scanned, it only // points to shaded objects, so it's not hiding anything, and the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 15.7K bytes - Viewed (0) -
tensorflow/c/tf_tensor.h
// Copy the internal data representation of `from` to `to`. `new_dims` and // `num_new_dims` specify the new shape of the `to` tensor, `type` specifies its // data type. On success, *status is set to TF_OK and the two tensors share the // same data buffer. // // This call requires that the `from` tensor and the given type and shape (dims // and num_dims) are "compatible" (i.e. they occupy the same number of bytes).
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 06 16:40:30 UTC 2024 - 6.3K bytes - Viewed (0) -
src/cmd/compile/internal/test/testdata/mergelocals/integration.go
// Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package p // This type and the following one will share the same GC shape and size. type Pointery struct { p *Pointery x [1024]int } type Pointery2 struct { p *Pointery2 x [1024]int } // This type and the following one will have the same size. type Vanilla struct {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 17:42:19 UTC 2024 - 1.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
namespace tensorflow { class XlaDevice : public LocalDevice { public: // Given a tensor, sets `xla::Shape*` the shape of tensor's representation // on device, fully padded. On error, the contents of `xla::Shape*` // are undefined. typedef std::function<Status(const Tensor&, xla::Shape*)> PaddedShapeFn; // Wrapper class to store metadata about the XlaDevice, where it can be
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
src/runtime/slice.go
} } else { // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. to = mallocgc(tomem, et, true) if copymem > 0 && writeBarrier.enabled { // Only shade the pointers in old.array since we know the destination slice to // only contains nil pointers because it has been cleared during alloc. // // It's safe to pass a type to this function as an optimization because
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 12.2K bytes - Viewed (0) -
src/runtime/proc.go
runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext))) if tail == atomic.Load(&pp.runqtail) { return head == tail && runnext == 0 } } } // To shake out latent assumptions about scheduling order, // we introduce some randomness into scheduling decisions // when running with the race detector. // The need for this was made obvious by changing the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0)