- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 546 for shake (0.38 sec)
-
tensorflow/compiler/jit/xla_host_recv_device_context.h
// xla::Shape shape(xla::F32, {2, 2}, {}, {}) // tsl::AsyncValueRef<std::unique_ptr<se::Event>> done_event = // tsl::MakeConstructedAsyncValueRef<std::unique_ptr<se::Event>>(stream.parent()); // done_event->Init(); // Tensor dest_cpu_tensor; // // XlaHostRecvDeviceContext device_context(&stream, gpu_dst, // shape, done_event); // device_context.CopyDeviceTensorToCPUSync(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/convert_type.cc
llvm::SmallVectorImpl<int64_t>* shape) { shape->reserve(input_shape.dims()); for (const auto& d : input_shape) { shape->push_back(d.size == kTFDynamicSize ? ShapedType::kDynamic : d.size); } } Status ConvertToMlirShape(const TensorShapeProto& input_shape, llvm::SmallVectorImpl<int64_t>* shape) { shape->reserve(input_shape.dim_size());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 7.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/fold_broadcast_pass.cc
} // Helper method that given 'shape' and 'current_index' representing // index in broadcasted tensor, get the index in the flat original tensor. // 'shape' is computed from the original shape and the broadcast dimensions to // match result shape. int64_t GetElementIndex(llvm::SmallVectorImpl<int64_t> &shape, llvm::SmallVectorImpl<int64_t> ¤t_index) { int64_t ind = 0;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/c/experimental/saved_model/core/ops/variable_ops.cc
TF_RETURN_IF_ERROR(varhandle_op->SetAttrType("dtype", dtype)); // Note that if shape is unknown rank, shape.dim_sizes() will be empty, and // shape.dims() will be -1. absl::InlinedVector<int64_t, 4UL> dim_sizes = shape.dim_sizes(); TF_RETURN_IF_ERROR(varhandle_op->SetAttrShape( "shape", reinterpret_cast<const int64_t*>(dim_sizes.data()), shape.dims())); TF_RETURN_IF_ERROR(varhandle_op->SetAttrString("container", "", 0));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 23 11:28:19 UTC 2024 - 5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/unroll_batch_matmul_disabled.pbtxt
node { name: "Placeholder" op: "Placeholder" attr { key: "dtype" value { type: DT_FLOAT } } attr { key: "shape" value { shape { dim { size: 2 } dim { size: 5 } dim { size: 3 } } } } } node {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 1.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
// 'shape' is the original shape with padding to match result shape. int64_t GetElementIndex(const std::vector<int64_t>& shape, const std::vector<int64_t>& current_index) { int64_t ind = 0; int64_t mul = 1; for (int i = shape.size() - 1; i >= 0; --i) { ind += (current_index[i] % shape[i]) * mul; mul *= shape[i]; } return ind; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
// tail of the other operand and the intermediate result isn't used by other // ops. // $rhs is required to be the tail shape of $lhs, so after transformation the // shape of the binary op result is valid. For example, assume the shapes of // $input, $lhs and $rhs are [1600], [1,40,40] and [40x1]. After the // transformation, the shape of the binary op result is [40x1600], which
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/export_utils.h
// Fill in the contents of TensorShapeProto for the given shape. // ShapeContainerT is any type with the following methods: // bool hasRank() // ArrayRef<int64_t> getShape() // This includes mlir::TF::ShapeAttr and mlir::ShapedType. template <typename ShapeContainerT> void SetTensorShapeProto(ShapeContainerT shape, TensorShapeProto* proto) { if (shape.hasRank()) { for (int64_t dim : shape.getShape()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_context.cc
xla_tensor->WaitForDefinitionEventOnStream(device_to_host_stream.get()); // Transfer manager requires the shape of the shaped buffer to be the same as // literal shape except for the layout. Set the literal to use xla_tensor's // shape as it is derived from the cpu_tensor's shape using // shape_representation_fn_. xla::MutableBorrowingLiteral literal; TF_CHECK_OK(HostTensorToMutableBorrowingLiteral(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 12.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/fold_constant_transpose.cc
// in a `shape` shaped tensor. Assumes row-major order. `indices` and `shape` // should have the same size. // Example: Index (2, 3) of a (4, 5)-shaped tensor has the contiguous offset of // 2 * 5 + 3 = 13. int64_t GetContiguousOffset(const ArrayRef<int64_t> indices, const ArrayRef<int64_t> shape) { int64_t contiguous_offset = 0; int64_t base_offset = 1;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.7K bytes - Viewed (0)