- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 302 for Tshape (0.06 sec)
-
tensorflow/c/c_api_experimental_test.cc
CHECK_EQ(TF_OK, TF_GetCode(status_)) << TF_Message(status_); // Infer shape when everything is known. CheckOutputShapes(matmul_op, /*input_shapes*/ {make_shape({3, 2}), make_shape({2, 4})}, /*input_tensors*/ {}, /*expected_shape*/ make_shape({3, 4})); // Infer shape when second operand has unknown shape. CheckOutputShapes(matmul_op,
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Tue Jan 17 22:27:52 UTC 2023 - 13.1K bytes - Viewed (0) -
tensorflow/c/eager/parallel_device/parallel_device_lib.h
// devices of a ParallelDevice. If called, ParallelTensor::Shape inspects // `components` to determine a shape. static std::unique_ptr<ParallelTensor> FromTensorHandles( const ParallelDevice& parallel_device, std::vector<TensorHandlePtr> components, TF_Status* status); // Uses the provided shape without additional checks, which avoids blocking // when ParallelTensor::Shape is called.
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Mon Oct 21 04:14:14 UTC 2024 - 13.1K bytes - Viewed (0) -
tensorflow/c/eager/parallel_device/parallel_device_lib.cc
new ParallelTensor(parallel_device, std::move(components), dtype)); } absl::Status ParallelTensor::Shape(const std::vector<int64_t>** shape) const { if (!shape_.has_value()) { TF_Status status; PartialTensorShape combined_shape; TF_RETURN_IF_ERROR(unwrap(tensors_[0].get())->Shape(&combined_shape)); for (const TensorHandlePtr& component : tensors_) { PartialTensorShape component_shape;
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Mon Oct 21 04:14:14 UTC 2024 - 25.9K bytes - Viewed (0) -
tensorflow/c/c_api.cc
std::vector<PartialTensorShape> shapes; shapes.reserve(num_shapes); for (int i = 0; i < num_shapes; ++i) { if (num_dims[i] < 0) { shapes.emplace_back(); } else { shapes.emplace_back(ArraySlice<int64_t>( reinterpret_cast<const int64_t*>(dims[i]), num_dims[i])); } } desc->node_builder.Attr(attr_name, shapes); }
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Sat Oct 12 16:27:48 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/c/eager/c_api_unified_experimental_graph.cc
std::vector<PartialTensorShape> shapes; shapes.reserve(num_values); for (int i = 0; i < num_values; ++i) { if (num_dims[i] < 0) { shapes.emplace_back(); } else { shapes.emplace_back(ArraySlice<int64_t>( reinterpret_cast<const int64_t*>(dims[i]), num_dims[i])); } } op_->node_builder.Attr(attr_name, shapes); return absl::OkStatus(); }
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Sat Oct 12 05:11:17 UTC 2024 - 15.7K bytes - Viewed (0) -
tensorflow/c/c_api.h
// setting a shape of [-1, 2] with an existing shape [2, -1] would set // a final shape of [2, 2] based on shape merging semantics. // // Returns an error into `status` if: // * `output` is not in `graph`. // * An invalid shape is being set (e.g., the shape being set // is incompatible with the existing shape). TF_CAPI_EXPORT extern void TF_GraphSetTensorShape(TF_Graph* graph,
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Thu Oct 26 21:08:15 UTC 2023 - 82.3K bytes - Viewed (0) -
tensorflow/c/c_api_experimental.h
TF_ShapeAndTypeList** shape_list_array, int num_items); // Infer shapes for the given `op`. The arguments mimic the arguments of the // `shape_inference::InferenceContext` constructor. Note the following: // - The inputs of the `op` are not used for shape inference. So, it is // OK to not have the inputs properly set in `op`. See `input_tensors` // if you want shape inference to consider the input tensors of the // op for shape inference.
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Thu Apr 27 21:07:00 UTC 2023 - 15.1K bytes - Viewed (0) -
tensorflow/c/eager/immediate_execution_tensor_handle.h
// // -1 indicates an unknown axis length; this is unreachable for most standard // ImmediateExecutionTensorHandles, but comes up for example when computing // the shape of a parallel tensor with component shapes differing across // devices. virtual absl::Status Dim(int dim_index, int64_t* dim) const = 0; // Returns the device which created the handle.
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Sat Oct 12 05:11:17 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/c/c_api_internal.h
namespace tensorflow { // Set the shapes and types of the output's handle. // // The lengths of the arrays pointed to by `shapes`, `ranks`, and `types` must // all be equal to `num_shapes_and_types`. If `ranks[i] != -1`, (i.e., if the // rank is known), then it must be equal to the length of `shapes[i]`; if // `ranks[i] == 1`, then `shapes[i]` may be nullptr. //
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Sat May 13 00:49:12 UTC 2023 - 7.6K bytes - Viewed (0) -
RELEASE.md
* Tracing/timeline support for distributed runtime (no GPU profiler yet). * C API gives access to inferred shapes with `TF_GraphGetTensorNumDims` and `TF_GraphGetTensorShape`. * Shape functions for core ops have moved to C++ via `REGISTER_OP(...).SetShapeFn(...)`. Python shape inference RegisterShape calls use the C++ shape functions with `common_shapes.call_cpp_shape_fn`. A future release will remove `RegisterShape` from python.
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Tue Oct 22 14:33:53 UTC 2024 - 735.3K bytes - Viewed (0)