- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 1,700 for shake (0.04 sec)
-
tensorflow/cc/framework/scope_internal.h
// name that has not been used so far in a scope will get no suffix. Later // uses of the same name will get suffixes _1, _2, _3, etc. Multiple scopes // can share the same NameMap. For instance, a new scope created using // WithControlDependencies() would share the same NameMap with the parent. typedef std::unordered_map<string, int> NameMap; Impl(const std::shared_ptr<Graph>& graph, const std::shared_ptr<Status>& status,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 15:46:43 UTC 2022 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shared_variable_v1.py
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key2"] def Test(): x = tf.constant([[1.0], [1.0], [1.0]]) y = tf.get_variable( name='y', shape=(1, 3), initializer=tf.random_normal_initializer(), trainable=True) r = tf.matmul(x, y) tensor_info_x = tf.saved_model.utils.build_tensor_info(x) tensor_info_r = tf.saved_model.utils.build_tensor_info(r)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:49:35 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
namespace tensorflow { class XlaDevice : public LocalDevice { public: // Given a tensor, sets `xla::Shape*` the shape of tensor's representation // on device, fully padded. On error, the contents of `xla::Shape*` // are undefined. typedef std::function<Status(const Tensor&, xla::Shape*)> PaddedShapeFn; // Wrapper class to store metadata about the XlaDevice, where it can be
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
src/runtime/slice.go
} } else { // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. to = mallocgc(tomem, et, true) if copymem > 0 && writeBarrier.enabled { // Only shade the pointers in old.array since we know the destination slice to // only contains nil pointers because it has been cleared during alloc. // // It's safe to pass a type to this function as an optimization because
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 12.2K bytes - Viewed (0) -
src/runtime/proc.go
runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext))) if tail == atomic.Load(&pp.runqtail) { return head == tail && runnext == 0 } } } // To shake out latent assumptions about scheduling order, // we introduce some randomness into scheduling decisions // when running with the race detector. // The need for this was made obvious by changing the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad_test.cc
TEST_F(NNGradTest, SoftmaxGrad) { TensorShape shape({32, 10}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)); auto y = Softmax(scope_, x); RunTest(x, shape, y, shape); } TEST_F(NNGradTest, SoftmaxRank3Grad) { TensorShape shape({32, 1, 10}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)); auto y = Softmax(scope_, x); RunTest(x, shape, y, shape); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 22 20:45:22 UTC 2022 - 15K bytes - Viewed (0) -
tensorflow/cc/gradients/array_grad_test.cc
TensorShape shape({2, 2}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)); auto indices = Const(scope_, {{1}, {0}}); auto y = GatherNd(scope_, x, indices); RunTest(x, shape, y, shape); } TEST_F(ArrayGradTest, GatherNdGrad_SliceIndexing_Int64) { TensorShape shape({2, 2}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)); auto indices = Cast(scope_, Const(scope_, {{1}, {0}}), DT_INT64);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 10 23:33:32 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/replicate_invariant_op_hoisting.mlir
} func.return } // CHECK: %[[SHAPE:[0-9]*]] = "tf.Shape"(%[[ARG_0]]) // CHECK: %[[OP_A:[0-9]*]] = "tf.opA"(%[[SHAPE]]) // CHECK: %[[OP_B:[0-9]*]] = "tf.opB"(%[[SHAPE]], %[[OP_A]]) // CHECK: tf_device.replicate // CHECK: tf_device.return %[[SHAPE]], %[[OP_A]], %[[OP_B]] // CHECK-LABEL: func @nested_ops
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 11.9K bytes - Viewed (0) -
src/cmd/go/internal/str/str_test.go
// if either host or share name is different, return s {`\\host\share\foo`, ``, `\\host\share\foo`}, {`\\host\share\foo`, `\foo`, `\\host\share\foo`}, {`\\host\share\foo`, `\\host\other\`, `\\host\share\foo`}, {`\\host\share\foo`, `\\other\share\`, `\\host\share\foo`}, {`\\host\share\foo`, `\\host\`, `\\host\share\foo`}, {`\\host\share\foo`, `\share\`, `\\host\share\foo`}, // only volume names are case-insensitive
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jan 25 16:49:13 UTC 2023 - 5.4K bytes - Viewed (0) -
tensorflow/c/eager/unified_api_test.cc
absl::Span<AbstractTensorHandle*> outputs) { PartialTensorShape shape; TF_RETURN_IF_ERROR(inputs[0]->Shape(&shape)); if (shape.dims() != 0) { return errors::InvalidArgument( "Tensor expected to have scalar shape found rank: ", shape.dims()); } return absl::OkStatus(); } TEST_P(UnifiedAPI, TestTensorShapeScalar) { if (UseFunction() && UseMlir()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 27 13:57:45 UTC 2024 - 6.7K bytes - Viewed (0)