- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 42 for shape_1 (0.06 sec)
-
tensorflow/c/eager/parallel_device/parallel_device_lib.h
ParallelTensor(const ParallelDevice& device, std::vector<TensorHandlePtr> tensors, absl::Span<const int64_t> shape, const TF_DataType dtype) : device_(device), tensors_(std::move(tensors)), shape_(std::vector<int64_t>(shape.begin(), shape.end())), dtype_(dtype) {} ParallelTensor(const ParallelDevice& device,
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Mon Oct 21 04:14:14 UTC 2024 - 13.1K bytes - Viewed (0) -
tensorflow/c/eager/parallel_device/parallel_device_lib.cc
new ParallelTensor(parallel_device, std::move(components), dtype)); } absl::Status ParallelTensor::Shape(const std::vector<int64_t>** shape) const { if (!shape_.has_value()) { TF_Status status; PartialTensorShape combined_shape; TF_RETURN_IF_ERROR(unwrap(tensors_[0].get())->Shape(&combined_shape)); for (const TensorHandlePtr& component : tensors_) { PartialTensorShape component_shape;
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Mon Oct 21 04:14:14 UTC 2024 - 25.9K bytes - Viewed (0) -
tensorflow/c/eager/c_api_unified_experimental_graph.cc
std::vector<PartialTensorShape> shapes; shapes.reserve(num_values); for (int i = 0; i < num_values; ++i) { if (num_dims[i] < 0) { shapes.emplace_back(); } else { shapes.emplace_back(ArraySlice<int64_t>( reinterpret_cast<const int64_t*>(dims[i]), num_dims[i])); } } op_->node_builder.Attr(attr_name, shapes); return absl::OkStatus(); }
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Sat Oct 12 05:11:17 UTC 2024 - 15.7K bytes - Viewed (0) -
tensorflow/c/c_api_experimental.h
TF_ShapeAndTypeList** shape_list_array, int num_items); // Infer shapes for the given `op`. The arguments mimic the arguments of the // `shape_inference::InferenceContext` constructor. Note the following: // - The inputs of the `op` are not used for shape inference. So, it is // OK to not have the inputs properly set in `op`. See `input_tensors` // if you want shape inference to consider the input tensors of the // op for shape inference.
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Thu Apr 27 21:07:00 UTC 2023 - 15.1K bytes - Viewed (0) -
tensorflow/c/eager/c_api_experimental_test.cc
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_Op* shape_op = ShapeOp(ctx, hgpu); TFE_OpSetDevice(shape_op, gpu_device_name.c_str(), status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_TensorHandle* retvals[1]; int num_retvals = 1; TFE_Execute(shape_op, &retvals[0], &num_retvals, status.get());
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Thu Aug 03 03:14:26 UTC 2023 - 31.5K bytes - Viewed (0) -
tensorflow/c/eager/c_api_experimental.h
int version = TFE_CUSTOM_DEVICE_VERSION; // Computes the rank of the tensor handle. // // Shapes are specified via callbacks because retrieving the shape of a tensor // is a blocking operation for async eager; custom devices should avoid // retrieving shapes of tensors they wrap until the custom device tensor's // shape is explicitly requested where possible. int (*num_dims)(void* data, TF_Status* status);
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Wed Feb 21 22:37:46 UTC 2024 - 39.5K bytes - Viewed (0) -
tensorflow/c/eager/c_api_test_util.cc
TF_DeleteStatus(status); TFE_OpSetAttrType(op, "T", TFE_TensorHandleDataType(a)); return op; } TFE_Op* ShapeOp(TFE_Context* ctx, TFE_TensorHandle* a) { TF_Status* status = TF_NewStatus(); TFE_Op* op = TFE_NewOp(ctx, "Shape", status); CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TFE_OpAddInput(op, a, status);
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Wed Feb 21 22:37:46 UTC 2024 - 23.5K bytes - Viewed (0) -
tensorflow/c/eager/parallel_device/parallel_device_lib_test.cc
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); const std::vector<std::unique_ptr<ParallelTensor>>& handles = *outputs; const std::vector<int64_t>* shape; absl::Status s = handles[0]->Shape(&shape); ASSERT_TRUE(s.ok()); EXPECT_EQ(0, shape->size()); } TEST(PARALLEL_DEVICE_LIB, TestCancelOnError) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus);
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Mon Oct 21 04:14:14 UTC 2024 - 15.6K bytes - Viewed (0) -
tensorflow/c/c_api_experimental_test.cc
CHECK_EQ(TF_OK, TF_GetCode(status_)) << TF_Message(status_); // Infer shape when everything is known. CheckOutputShapes(matmul_op, /*input_shapes*/ {make_shape({3, 2}), make_shape({2, 4})}, /*input_tensors*/ {}, /*expected_shape*/ make_shape({3, 4})); // Infer shape when second operand has unknown shape. CheckOutputShapes(matmul_op,
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Tue Jan 17 22:27:52 UTC 2023 - 13.1K bytes - Viewed (0) -
tensorflow/c/c_api_experimental.cc
ShapeHandle shape_handle = c.output(i); TF_ShapeAndType& shape = output_shapes_result->items[i]; shape.num_dims = c.Rank(shape_handle); if (shape.num_dims == InferenceContext::kUnknownRank) { shape.dims = nullptr; continue; } shape.dims = new int64_t[shape.num_dims]; for (size_t j = 0; j < shape.num_dims; ++j) { shape.dims[j] = c.Value(c.Dim(shape_handle, j)); } }
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Sat Oct 12 16:27:48 UTC 2024 - 29.5K bytes - Viewed (0)