- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 593 for shake (0.06 sec)
-
platforms/software/dependency-management/src/test/groovy/org/gradle/api/internal/artifacts/ivyservice/resolveengine/excludes/factories/NormalizingExcludeFactoryTest.groovy
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Tue Oct 10 21:10:11 UTC 2023 - 17K bytes - Viewed (0) -
src/vendor/golang.org/x/sys/cpu/cpu.go
HasSHA1 bool // K{I,L}MD-SHA-1 functions HasSHA256 bool // K{I,L}MD-SHA-256 functions HasSHA512 bool // K{I,L}MD-SHA-512 functions HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions HasVX bool // vector facility HasVXE bool // vector-enhancements facility 1 _ CacheLinePad } func init() { archInit() initOptions() processOptions() }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 16:12:58 UTC 2024 - 12.1K bytes - Viewed (0) -
platforms/documentation/docs/src/docs/userguide/optimizing-performance/build-cache/build_cache_debugging.adoc
[[finding_problems]] == Finding problems with task output caching Below we describe a step-by-step process that should help shake out any problems with caching in your build. === Ensure incremental build works First, make sure your build does the right thing without the cache. Run a build twice without enabling the Gradle build cache.
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Dec 07 01:37:51 UTC 2023 - 15K bytes - Viewed (0) -
subprojects/core/src/integTest/groovy/org/gradle/configuration/ExecuteUserLifecycleListenerBuildOperationIntegrationTest.groovy
verifyHasChildren(whenReadyEvaluated, initScriptAppId, 'init', expectedGradleOps) } def 'no extra executions for composite builds'() { // This test does two things: // - shake out internal listener registration that isn't using InternalListener. // There are a lost of listeners registered through the methods that we've decorated in the composite build code
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon May 20 11:16:24 UTC 2024 - 40.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/shape-inference.mlir
// RUN: tf-opt -split-input-file -verify-diagnostics --tf-shape-inference %s | FileCheck %s module attributes {tf.versions = {producer = 888 : i32}} { // CHECK-LABEL: testConv2dShapeValidPadding func.func @testConv2dShapeValidPadding(%arg0: tensor<1x112x80x128xf32>, %arg1: tensor<128x3x3x128xf32>, %arg2: tensor<128xf32>) -> tensor<1x?x?x128xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 11.5K bytes - Viewed (0) -
src/runtime/mbarrier.go
// this will shade it. // // 3. Once a goroutine's stack is black, the shade(ptr) becomes // unnecessary. shade(ptr) prevents hiding an object by moving it from // the stack to the heap, but this requires first having a pointer // hidden on the stack. Immediately after a stack is scanned, it only // points to shaded objects, so it's not hiding anything, and the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 15.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
namespace tensorflow { class XlaDevice : public LocalDevice { public: // Given a tensor, sets `xla::Shape*` the shape of tensor's representation // on device, fully padded. On error, the contents of `xla::Shape*` // are undefined. typedef std::function<Status(const Tensor&, xla::Shape*)> PaddedShapeFn; // Wrapper class to store metadata about the XlaDevice, where it can be
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
src/runtime/slice.go
} } else { // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. to = mallocgc(tomem, et, true) if copymem > 0 && writeBarrier.enabled { // Only shade the pointers in old.array since we know the destination slice to // only contains nil pointers because it has been cleared during alloc. // // It's safe to pass a type to this function as an optimization because
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 12.2K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad_test.cc
TEST_F(NNGradTest, SoftmaxGrad) { TensorShape shape({32, 10}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)); auto y = Softmax(scope_, x); RunTest(x, shape, y, shape); } TEST_F(NNGradTest, SoftmaxRank3Grad) { TensorShape shape({32, 1, 10}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)); auto y = Softmax(scope_, x); RunTest(x, shape, y, shape); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 22 20:45:22 UTC 2022 - 15K bytes - Viewed (0) -
tensorflow/cc/gradients/array_grad_test.cc
TensorShape shape({2, 2}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)); auto indices = Const(scope_, {{1}, {0}}); auto y = GatherNd(scope_, x, indices); RunTest(x, shape, y, shape); } TEST_F(ArrayGradTest, GatherNdGrad_SliceIndexing_Int64) { TensorShape shape({2, 2}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)); auto indices = Cast(scope_, Const(scope_, {{1}, {0}}), DT_INT64);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 10 23:33:32 UTC 2023 - 19.3K bytes - Viewed (0)