- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 290 for shake (0.08 sec)
-
src/runtime/proc.go
runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext))) if tail == atomic.Load(&pp.runqtail) { return head == tail && runnext == 0 } } } // To shake out latent assumptions about scheduling order, // we introduce some randomness into scheduling decisions // when running with the race detector. // The need for this was made obvious by changing the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc
// Extracts shape from XlaArgument as TensorShape. If shape is a xla::Shape, // that is converted to a TensorShape. absl::StatusOr<TensorShape> GetTensorShapeFromXlaArgument( const XlaArgument& arg) { if (absl::holds_alternative<xla::Shape>(arg.shape)) { TensorShape arg_shape; TF_RETURN_IF_ERROR( XLAShapeToTensorShape(std::get<xla::Shape>(arg.shape), &arg_shape)); return arg_shape;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 17:24:39 UTC 2024 - 45.3K bytes - Viewed (0) -
tensorflow/compiler/jit/shape_inference.cc
// Merge node causes a loop so we remove NextIteration->Merge edge before // performing shape inference. But removing those edges also prevents us // from inferring output shape for Merge node (we need shapes for all its // inputs). // For loop invariant resource input's Merge node, we set output resource // shape as Enter node's resource shape. // TODO(b/129367850): clean this up.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_device_context.h
// se::DeviceMemoryBase gpu_dst{device_tensor.data(), 4 * sizeof(float)}; // xla::Shape shape(xla::F32, {2, 2}, {}, {}) // tsl::AsyncValueRef<std::unique_ptr<se::Event>> done_event = // tsl::MakeConstructedAsyncValueRef<std::unique_ptr<se::Event>>(stream.parent()); // done_event->Init(); // // XlaHostSendDeviceContext device_context(&stream, &gpu_dst, // shape, done_event); // device_context.CopyCPUTensorToDeviceSync(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.7K bytes - Viewed (0) -
src/runtime/pprof/protomem_test.go
const expectedLocation = "runtime/pprof.nonRecursiveGenericAllocFunction[go.shape.struct {},go.shape.struct { runtime/pprof.buf [128]uint8 }];runtime/pprof.nonRecursiveGenericAllocFunction[go.shape.struct...
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 21 14:38:45 UTC 2024 - 6.7K bytes - Viewed (0) -
src/math/big/arith_decl.go
// Notable members of the hall of shame include: // - github.com/remyoudompheng/bigfft // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname addVV //go:noescape func addVV(z, x, y []Word) (c Word) // subVV should be an internal detail, // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - github.com/remyoudompheng/bigfft
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:15:13 UTC 2024 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_tpu_device.cc
// Given a tensor of `shape` and `type`, as what shape should it be stored on // the TPU device? This function tranposes or flattens the excessively-padded // tensors to rank 1, but leaves other tensor shapes alone. absl::StatusOr<xla::Shape> TpuShapeRepresentation( const TensorShape& shape, DataType type, bool use_fast_memory, XlaLayoutPreference layout_preference) { xla::Shape xla_shape; TF_RETURN_IF_ERROR(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 22:53:47 UTC 2024 - 20.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc
} SmallVector<int64_t> shape; bool refined_shape = false; // Build the shape of the refined type, if lhs is unranked it // will be directly the shape of the refined type, otherwise we merged by // taking the most specialized. This combines `10x?x?` and `?x?x8` into // `10x?x8`. if (!lhs_shape_type.hasRank()) { if (rhs_shape_type.hasRank()) { shape.append(rhs_shape_type.getShape().begin(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 07:28:49 UTC 2024 - 134.1K bytes - Viewed (0) -
tensorflow/compiler/jit/shape_inference_test.cc
auto c = ops::Add(root.WithOpName("C"), a, b); auto d = ops::Neg(root.WithOpName("D"), c); a.node()->AddAttr("_index", 0); b.node()->AddAttr("_index", 1); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_CHECK_OK(root.ToGraph(graph.get())); std::map<int, InferredShape> arg_shapes; arg_shapes[0].shape = TensorShape({2, 3}); arg_shapes[1].shape = TensorShape({2, 3});
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
TensorShape shape; TF_RETURN_IF_ERROR(XLAShapeToTensorShape(subshape, &shape)); output_tensor_shapes.push_back(shape); } } else { for (int i = 0; i < ctx->num_outputs(); ++i) { output_tensor_shapes.push_back(compilation_result->outputs[i].shape); } } // Copy XLA results to the OpOutputList. int output_num = 0; for (int i = 0, end = ctx->num_outputs(); i < end; ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0)