Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 29 for shape_determination_fns_ (0.32 sec)

  1. tensorflow/compiler/jit/pjrt_base_device.h

          return shape_determination_fns_.at(0);
        }
    
        const XlaShapeLayoutHelpers::ShapeDeterminationFns&
        shape_determination_fns_at(int i) const {
          return shape_determination_fns_[i];
        }
    
       private:
        const DeviceType jit_device_type_;
        std::vector<XlaShapeLayoutHelpers::ShapeDeterminationFns>
            shape_determination_fns_;
    
        Metadata(const Metadata&) = delete;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 4K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/pjrt_device_context.h

    // devices using PjRt.
    class PjRtDeviceContext : public DeviceContext {
     public:
      explicit PjRtDeviceContext(
          XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
          bool use_pjrt_tensor_buffer = false)
          : shape_determination_fns_(std::move(shape_determination_fns)),
            use_pjrt_tensor_buffer_(use_pjrt_tensor_buffer) {}
    
      void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jul 19 19:27:39 UTC 2023
    - 2.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_device_context.h

        return device_to_device_streams_.at(index).get();
      }
      xla::TransferManager* transfer_manager() const { return transfer_manager_; }
      const XlaShapeLayoutHelpers::ShapeDeterminationFns& shape_determination_fns()
          const {
        return shape_determination_fns_;
      }
    
      // Returns a device-to-device stream, in round-robin fashion.
      se::Stream* GetDeviceToDeviceStream();
    
      Status ThenExecute(Device* device, stream_executor::Stream* stream,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 5.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_device_context.cc

      CHECK(xla_tensor);
    
      XlaLayoutPreference layout_preference =
          shape_determination_fns_.layout_preference_fn(
              device_tensor->shape(), device_tensor->dtype(), std::nullopt);
      Status status = [&]() -> Status {
        TF_ASSIGN_OR_RETURN(xla::Shape shape,
                            shape_determination_fns_.shape_representation_fn(
                                device_tensor->shape(), device_tensor->dtype(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_device.h

        const DeviceType& jit_device_type() const;
        const XlaShapeLayoutHelpers::ShapeDeterminationFns&
        default_shape_determination_fns() const {
          return shape_determination_fns_.at(0);
        }
        const PaddedShapeFn& padded_shape_fn() const { return padded_shape_fn_; }
    
        bool UseMultipleStreams() const { return use_multiple_streams_; }
    
       private:
        const int device_ordinal_;
        const DeviceType device_type_;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_device.cc

        std::vector<XlaShapeLayoutHelpers::ShapeDeterminationFns>
            shape_determination_fns,
        PaddedShapeFn padded_shape_fn, bool use_multiple_streams)
        : device_ordinal_(device_ordinal),
          device_type_(device_type),
          platform_(platform),
          shape_determination_fns_(std::move(shape_determination_fns)),
          padded_shape_fn_(std::move(padded_shape_fn)),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 21:05:42 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/pjrt_device_context.cc

        const XlaShapeLayoutHelpers::ShapeDeterminationFns
            shape_determination_fns) {
      XlaLayoutPreference layout_preference =
          shape_determination_fns.layout_preference_fn(
              cpu_tensor->shape(), cpu_tensor->dtype(), std::nullopt);
      TF_ASSIGN_OR_RETURN(xla::Shape shape,
                          shape_determination_fns.shape_representation_fn(
                              cpu_tensor->shape(), cpu_tensor->dtype(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 08:49:31 UTC 2024
    - 11.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_compiler_options_util.cc

              << ",graph_def_version=" << options.graph_def_version
              << ",options.shape_determination_fns.layout_preference_fn?="
              << (options.shape_determination_fns.layout_preference_fn != nullptr)
              << ",options.shape_determination_fns.shape_representation_fn?="
              << (options.shape_determination_fns.shape_representation_fn !=
                  nullptr)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc

          hlo_sharding, /*use_fast_memory=*/false, shape_determination_fns, shape));
      return absl::OkStatus();
    }
    
    // Converts arg_shapes to xla::Shape's and store into xla_input_shapes.
    Status GetXlaInputShapes(
        mlir::ModuleOp module, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
        bool use_tuple_args,
        const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
        std::vector<xla::Shape>* xla_input_shapes) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 17:24:39 UTC 2024
    - 45.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.cc

              use_tuple_args, true, shape_determination_fns, compilation_result,
              custom_legalization_passes, metadata.module_name(),
              lower_to_xla_hlo));
    
      // Compute how arguments are shared across different cores.
      auto sharding_result =
          tpu::GetShardingInfo(metadata, arg_shapes, shape_determination_fns,
                               arg_core_mapping, per_core_arg_shapes);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 20:29:34 UTC 2024
    - 6.1K bytes
    - Viewed (0)
Back to top