- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 639 for ShapeN (0.29 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/compile_mlir_util/add.mlir
// RUN: tf-mlir-translate -mlir-tf-to-hlo-text %s -tf-input-shapes=: -tf-xla-emit-use-tuple-args -tf-xla-emit-return-tuple | FileCheck -check-prefix=TUPLE-ARGS %s // RUN: tf-mlir-translate -mlir-tf-to-hlo-text %s -tf-input-shapes=: | FileCheck -check-prefix=NO_RET_TUPLE %s // RUN: tf-mlir-translate -mlir-tf-to-hlo-text-via-builder %s -tf-input-shapes=: | FileCheck -check-prefix=NO_RET_TUPLE %s
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 23 18:56:13 UTC 2022 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/jit/test_util.cc
TF_RET_CHECK(sit != shape_info.end()) << "Missing shape information for node " << node->name(); std::vector<PartialTensorShape> shapes; for (const auto& output : sit->second) shapes.push_back(output.shape); auto it = expected_shapes.find(node->name()); if (it != expected_shapes.end()) { if (!PartialTensorShapeUtils::AreIdentical(shapes, it->second)) { return errors::InvalidArgument(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 09 11:36:41 UTC 2024 - 3.7K bytes - Viewed (0) -
pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go
} } func requestedToCapacityRatioScorer(resources []config.ResourceSpec, shape []config.UtilizationShapePoint) func([]int64, []int64) int64 { shapes := make([]helper.FunctionShapePoint, 0, len(shape)) for _, point := range shape { shapes = append(shapes, helper.FunctionShapePoint{ Utilization: int64(point.Utilization),
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Dec 21 15:23:47 UTC 2022 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/arg-data-type.pbtxt
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-shapes=:1 -tf-output-arrays=p,x -o - | FileCheck %s --check-prefix=NONE # RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-shapes=:1 -tf-input-data-types=DT_INT32,DT_BOOL -tf-output-arrays=p,x -o - | FileCheck %s # Test the handling of the input data types. In particular, if the data type
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 10 19:32:15 UTC 2020 - 1.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/convert_type_test.cc
bool unknown_rank, DataType dtype) { TensorShapeProto shape; shape.set_unknown_rank(unknown_rank); for (int64_t dim : dims) { shape.add_dim()->set_size(dim); } mlir::MLIRContext context; mlir::Builder b(&context); auto status_or = ConvertToMlirTensorType(shape, dtype, &b); std::string buf; llvm::raw_string_ostream os(buf); status_or.value().print(os);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 2.2K bytes - Viewed (0) -
tensorflow/compiler/jit/shape_inference.h
namespace tensorflow { struct InferredShape { // Shape of the argument tensor. PartialTensorShape shape; // If the argument is a resource variable, the type and shape of the // variable's value. DataType handle_type = DT_INVALID; PartialTensorShape handle_shape; }; typedef std::unordered_map<string, std::vector<InferredShape>> GraphShapeInfo; // Infer shapes for all Tensors in a graph, and save them in a map. The vector
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 2.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h
// Converts a shape from MLIR to a TensorFlow tensor shape proto. void ConvertToTensorShapeProto(llvm::ArrayRef<int64_t> shape, TensorShapeProto* output_shape); // Converts an MLIR type to a TensorFlow tensor shape. PartialTensorShape ConvertTypeToTensorShape(const mlir::Type& type); // Converts an MLIR shaped type to a TensorFlow shape attribute.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 2.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_recv_device_context.h
public: XlaHostRecvDeviceContext( se::Stream* stream, const se::DeviceMemoryBase& device_memory_base, const xla::Shape& shape, tsl::AsyncValueRef<std::unique_ptr<se::Event>>& done_event) : stream_(stream), device_memory_base_(device_memory_base), shape_(shape), done_event_(done_event) {} void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_compiler_client.cc
#include "xla/client/local_client.h" namespace tensorflow { namespace { std::vector<const xla::Shape*> GetShapePointers( absl::Span<const xla::Shape> shapes) { std::vector<const xla::Shape*> shape_ptrs; shape_ptrs.reserve(shapes.size()); for (const auto& shape : shapes) { shape_ptrs.push_back(&shape); } return shape_ptrs; } } // namespace absl::StatusOr<std::unique_ptr<xla::LocalExecutable>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 4.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/arg-data-type-with-subtype.pbtxt
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-data-types="DT_INT32,DT_RESOURCE(DT_INT32)" -tf-output-arrays=p,x -o - | FileCheck %s -check-prefix=CHECK-NO-SHAPE
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 27 00:16:51 UTC 2022 - 1.5K bytes - Viewed (0)