Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 1,054 for ShapeN (0.12 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/compile_mlir_util/add.mlir

    // RUN: tf-mlir-translate -mlir-tf-to-hlo-text %s -tf-input-shapes=: -tf-xla-emit-use-tuple-args -tf-xla-emit-return-tuple | FileCheck -check-prefix=TUPLE-ARGS %s
    // RUN: tf-mlir-translate -mlir-tf-to-hlo-text %s -tf-input-shapes=: | FileCheck -check-prefix=NO_RET_TUPLE %s
    // RUN: tf-mlir-translate -mlir-tf-to-hlo-text-via-builder %s -tf-input-shapes=: | FileCheck -check-prefix=NO_RET_TUPLE %s
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 23 18:56:13 UTC 2022
    - 2.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/test_util.cc

        TF_RET_CHECK(sit != shape_info.end())
            << "Missing shape information for node " << node->name();
        std::vector<PartialTensorShape> shapes;
        for (const auto& output : sit->second) shapes.push_back(output.shape);
    
        auto it = expected_shapes.find(node->name());
        if (it != expected_shapes.end()) {
          if (!PartialTensorShapeUtils::AreIdentical(shapes, it->second)) {
            return errors::InvalidArgument(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 09 11:36:41 UTC 2024
    - 3.7K bytes
    - Viewed (0)
  3. pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go

    	}
    }
    
    func requestedToCapacityRatioScorer(resources []config.ResourceSpec, shape []config.UtilizationShapePoint) func([]int64, []int64) int64 {
    	shapes := make([]helper.FunctionShapePoint, 0, len(shape))
    	for _, point := range shape {
    		shapes = append(shapes, helper.FunctionShapePoint{
    			Utilization: int64(point.Utilization),
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Dec 21 15:23:47 UTC 2022
    - 2.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/arg-data-type.pbtxt

    # RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-shapes=:1  -tf-output-arrays=p,x -o - | FileCheck %s --check-prefix=NONE
    # RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-shapes=:1  -tf-input-data-types=DT_INT32,DT_BOOL -tf-output-arrays=p,x -o - | FileCheck %s
    
    # Test the handling of the input data types. In particular, if the data type
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 10 19:32:15 UTC 2020
    - 1.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/utils/convert_type_test.cc

                                    bool unknown_rank, DataType dtype) {
      TensorShapeProto shape;
      shape.set_unknown_rank(unknown_rank);
      for (int64_t dim : dims) {
        shape.add_dim()->set_size(dim);
      }
      mlir::MLIRContext context;
      mlir::Builder b(&context);
      auto status_or = ConvertToMlirTensorType(shape, dtype, &b);
      std::string buf;
      llvm::raw_string_ostream os(buf);
      status_or.value().print(os);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 2.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/shape_inference.h

    namespace tensorflow {
    
    struct InferredShape {
      // Shape of the argument tensor.
      PartialTensorShape shape;
    
      // If the argument is a resource variable, the type and shape of the
      // variable's value.
      DataType handle_type = DT_INVALID;
      PartialTensorShape handle_shape;
    };
    typedef std::unordered_map<string, std::vector<InferredShape>> GraphShapeInfo;
    
    // Infer shapes for all Tensors in a graph, and save them in a map.  The vector
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 00:41:19 UTC 2024
    - 2.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc

      // Check all elements besides at concat_dim match across all shape tensors.
      SmallVector<int32_t, 4> shape0;
      shape0.reserve(num_dims);
      for (int32_t dim : shapes.front().getValues<int32_t>()) shape0.push_back(dim);
    
      for (DenseIntElementsAttr shape : llvm::drop_begin(shapes, 1)) {
        for (const auto& dims_and_idx : llvm::enumerate(llvm::zip(shape0, shape))) {
          if (dims_and_idx.index() == concat_dim) continue;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 146.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h

    // Converts a shape from MLIR to a TensorFlow tensor shape proto.
    void ConvertToTensorShapeProto(llvm::ArrayRef<int64_t> shape,
                                   TensorShapeProto* output_shape);
    
    // Converts an MLIR type to a TensorFlow tensor shape.
    PartialTensorShape ConvertTypeToTensorShape(const mlir::Type& type);
    
    // Converts an MLIR shaped type to a TensorFlow shape attribute.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 26 09:37:10 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/xla_device.h

    namespace tensorflow {
    
    class XlaDevice : public LocalDevice {
     public:
      // Given a tensor, sets `xla::Shape*` the shape of tensor's representation
      // on device, fully padded. On error, the contents of `xla::Shape*`
      // are undefined.
      typedef std::function<Status(const Tensor&, xla::Shape*)> PaddedShapeFn;
    
      // Wrapper class to store metadata about the XlaDevice, where it can be
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/xla_host_recv_device_context.h

     public:
      XlaHostRecvDeviceContext(
          se::Stream* stream, const se::DeviceMemoryBase& device_memory_base,
          const xla::Shape& shape,
          tsl::AsyncValueRef<std::unique_ptr<se::Event>>& done_event)
          : stream_(stream),
            device_memory_base_(device_memory_base),
            shape_(shape),
            done_event_(done_event) {}
    
      void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 3.9K bytes
    - Viewed (0)
Back to top