- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 81 for shape_2 (0.06 sec)
-
tensorflow/c/c_api_experimental.h
TF_ShapeAndTypeList** shape_list_array, int num_items); // Infer shapes for the given `op`. The arguments mimic the arguments of the // `shape_inference::InferenceContext` constructor. Note the following: // - The inputs of the `op` are not used for shape inference. So, it is // OK to not have the inputs properly set in `op`. See `input_tensors` // if you want shape inference to consider the input tensors of the // op for shape inference.
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Thu Apr 27 21:07:00 UTC 2023 - 15.1K bytes - Viewed (0) -
tensorflow/c/eager/c_api_experimental_test.cc
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_Op* shape_op = ShapeOp(ctx, hgpu); TFE_OpSetDevice(shape_op, gpu_device_name.c_str(), status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_TensorHandle* retvals[1]; int num_retvals = 1; TFE_Execute(shape_op, &retvals[0], &num_retvals, status.get());
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Thu Aug 03 03:14:26 UTC 2023 - 31.5K bytes - Viewed (0) -
tensorflow/c/eager/c_api_experimental.h
int version = TFE_CUSTOM_DEVICE_VERSION; // Computes the rank of the tensor handle. // // Shapes are specified via callbacks because retrieving the shape of a tensor // is a blocking operation for async eager; custom devices should avoid // retrieving shapes of tensors they wrap until the custom device tensor's // shape is explicitly requested where possible. int (*num_dims)(void* data, TF_Status* status);
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Wed Feb 21 22:37:46 UTC 2024 - 39.5K bytes - Viewed (0) -
tensorflow/c/eager/c_api_test_util.cc
TF_DeleteStatus(status); TFE_OpSetAttrType(op, "T", TFE_TensorHandleDataType(a)); return op; } TFE_Op* ShapeOp(TFE_Context* ctx, TFE_TensorHandle* a) { TF_Status* status = TF_NewStatus(); TFE_Op* op = TFE_NewOp(ctx, "Shape", status); CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TFE_OpAddInput(op, a, status);
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Wed Feb 21 22:37:46 UTC 2024 - 23.5K bytes - Viewed (0) -
tensorflow/c/eager/c_api_test.cc
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_Op* shape_op = ShapeOp(ctx, hgpu); TFE_OpSetDevice(shape_op, gpu_device_name.c_str(), status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_TensorHandle* retvals[1]; int num_retvals = 1; TFE_Execute(shape_op, &retvals[0], &num_retvals, status.get());
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Thu Aug 03 20:50:20 UTC 2023 - 94.6K bytes - Viewed (0) -
tensorflow/c/eager/dlpack_test.cc
dltensor_in->device = {kDLCPU, 0}; dltensor_in->ndim = static_cast<int32_t>(shape.size()); dltensor_in->dtype = {kDLFloat, 32, 1}; dltensor_in->shape = shape.data(); dltensor_in->strides = strides.data(); TFE_TensorHandle* handle = TFE_HandleFromDLPack(&dlm_in, status, ctx); ASSERT_NE(handle, nullptr) << TF_Message(status) << " (shape=[" << absl::StrJoin(shape, ",") << "], strides=[" << absl::StrJoin(strides, ",") << "])";
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Fri Jun 30 03:04:46 UTC 2023 - 4.4K bytes - Viewed (0) -
RELEASE.md
* Tracing/timeline support for distributed runtime (no GPU profiler yet). * C API gives access to inferred shapes with `TF_GraphGetTensorNumDims` and `TF_GraphGetTensorShape`. * Shape functions for core ops have moved to C++ via `REGISTER_OP(...).SetShapeFn(...)`. Python shape inference RegisterShape calls use the C++ shape functions with `common_shapes.call_cpp_shape_fn`. A future release will remove `RegisterShape` from python.
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Tue Oct 22 14:33:53 UTC 2024 - 735.3K bytes - Viewed (0) -
tensorflow/c/eager/abstract_tensor_handle.h
virtual absl::Status TensorHandleStatus() const; // Returns tensor shape. If tensor has unknown rank, shape remains untouched. virtual absl::Status Shape(tensorflow::PartialTensorShape* shape) const = 0; // Returns tensor (full) type. // While there is no immediate plan to deprecate dtype and shape in favor // of only using full type type information, this is a future possibility. //
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Sat Oct 12 05:11:17 UTC 2024 - 3K bytes - Viewed (0) -
tensorflow/c/eager/c_api_debug.cc
std::vector<int64_t> shape; int rank = -1; *status = handle.NumDims(&rank); if (!status->ok()) { return shape; } shape.reserve(rank); for (int i = 0; i < rank; ++i) { int64_t dim; *status = handle.Dim(i, &dim); if (!status->ok()) { return shape; } shape.push_back(dim); } return shape; } } // namespace extern "C" {
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Sat Oct 12 05:11:17 UTC 2024 - 2.5K bytes - Viewed (0) -
tensorflow/c/eager/parallel_device/parallel_device_lib_test.cc
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); const std::vector<std::unique_ptr<ParallelTensor>>& handles = *outputs; const std::vector<int64_t>* shape; absl::Status s = handles[0]->Shape(&shape); ASSERT_TRUE(s.ok()); EXPECT_EQ(0, shape->size()); } TEST(PARALLEL_DEVICE_LIB, TestCancelOnError) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus);
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Mon Oct 21 04:14:14 UTC 2024 - 15.6K bytes - Viewed (0)