- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 491 for shade (0.15 sec)
-
src/runtime/mbarrier.go
// to unlink an object from the heap, this will shade it. // // 2. shade(ptr) prevents a mutator from hiding an object by moving // the sole pointer to it from its stack into a black object in the // heap. If it attempts to install the pointer into a black object, // this will shade it. // // 3. Once a goroutine's stack is black, the shade(ptr) becomes // unnecessary. shade(ptr) prevents hiding an object by moving it from
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 15.7K bytes - Viewed (0) -
src/runtime/slice.go
} } else { // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. to = mallocgc(tomem, et, true) if copymem > 0 && writeBarrier.enabled { // Only shade the pointers in old.array since we know the destination slice to // only contains nil pointers because it has been cleared during alloc. // // It's safe to pass a type to this function as an optimization because
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 12.2K bytes - Viewed (0) -
src/runtime/profbuf.go
// we finish executing. If a GC is in progress right now, it must // keep gp.labels alive, because gp.labels is reachable from gp. // If gp were to overwrite gp.labels, the deletion barrier would // still shade that pointer, which would preserve it for the // in-progress GC, so all is well. Any future GC will see the // value we copied when scanning b.tags (heap-allocated). // We arrange that the store here is always overwriting a nil,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 18.2K bytes - Viewed (0) -
platforms/documentation/docs/src/docs/userguide/releases/migrating/migrating_from_maven.adoc
[[migmvn:common_plugins]] == Migrating common plugins Maven and Gradle share a common approach of extending the build through plugins. Although the plugin systems are very different beneath the surface, they share many feature-based plugins, such as: * Shade/Shadow * Jetty * Checkstyle * JaCoCo * AntRun (see further down)
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Sat Mar 23 22:37:03 UTC 2024 - 40.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/shape-inference.mlir
// RUN: tf-opt -split-input-file -verify-diagnostics --tf-shape-inference %s | FileCheck %s module attributes {tf.versions = {producer = 888 : i32}} { // CHECK-LABEL: testConv2dShapeValidPadding func.func @testConv2dShapeValidPadding(%arg0: tensor<1x112x80x128xf32>, %arg1: tensor<128x3x3x128xf32>, %arg2: tensor<128xf32>) -> tensor<1x?x?x128xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
namespace tensorflow { class XlaDevice : public LocalDevice { public: // Given a tensor, sets `xla::Shape*` the shape of tensor's representation // on device, fully padded. On error, the contents of `xla::Shape*` // are undefined. typedef std::function<Status(const Tensor&, xla::Shape*)> PaddedShapeFn; // Wrapper class to store metadata about the XlaDevice, where it can be
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/cc/gradients/array_grad_test.cc
TensorShape shape({2, 2}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)); auto indices = Const(scope_, {{1}, {0}}); auto y = GatherNd(scope_, x, indices); RunTest(x, shape, y, shape); } TEST_F(ArrayGradTest, GatherNdGrad_SliceIndexing_Int64) { TensorShape shape({2, 2}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)); auto indices = Cast(scope_, Const(scope_, {{1}, {0}}), DT_INT64);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 10 23:33:32 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/replicate_invariant_op_hoisting.mlir
} func.return } // CHECK: %[[SHAPE:[0-9]*]] = "tf.Shape"(%[[ARG_0]]) // CHECK: %[[OP_A:[0-9]*]] = "tf.opA"(%[[SHAPE]]) // CHECK: %[[OP_B:[0-9]*]] = "tf.opB"(%[[SHAPE]], %[[OP_A]]) // CHECK: tf_device.replicate // CHECK: tf_device.return %[[SHAPE]], %[[OP_A]], %[[OP_B]] // CHECK-LABEL: func @nested_ops
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 11.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc
if (pack_axis < 0) { pack_axis += rank; } // Concat out shape. for (int i = 0; i < rank; ++i) { int64_t dim_size = input_type.getDimSize(i); if (i == pack_axis) { dim_size *= count; } concat_out_shape.push_back(dim_size); } // Pack out shape. int j = 0; for (int i = 0; i < rank + 1; ++i) { if (i == pack_axis) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 25.4K bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad_test.cc
xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape))); auto y = AddN(scope_, xs); RunTest(xs, {shape, shape, shape}, {y}, {shape}); } TEST_F(NaryGradTest, Add) { TensorShape x1_shape({3, 2, 5}); TensorShape x2_shape({2, 5}); auto x1 = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x1_shape)); auto x2 = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x2_shape));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 36K bytes - Viewed (0)