Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for out_tensor (0.14 sec)

  1. tensorflow/c/checkpoint_reader.cc

        const string& name, std::unique_ptr<tensorflow::Tensor>* out_tensor,
        TF_Status* out_status) const {
      Status status;
      if (reader_ != nullptr) {
        status = reader_->GetTensor(name, out_tensor);
      } else {
        tensorflow::DataType dtype;
        tensorflow::TensorShape shape;
        status = v2_reader_->LookupDtypeAndShape(name, &dtype, &shape);
        if (status.ok()) {
          out_tensor->reset(new Tensor(dtype, shape));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 21:29:12 UTC 2023
    - 5.5K bytes
    - Viewed (0)
  2. tensorflow/c/checkpoint_reader.h

      const TensorSliceReader::VarToDataTypeMap& GetVariableToDataTypeMap() const;
    
      // Attempts to look up the tensor named "name" and stores the found result in
      // "out_tensor".
      void GetTensor(const string& name,
                     std::unique_ptr<tensorflow::Tensor>* out_tensor,
                     TF_Status* out_status) const;
    
     private:
      // Uses "v2_reader_" to build "var name -> shape" and "var name -> data type"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 12 08:49:52 UTC 2023
    - 3.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_launch_util_gpu_test.cc

      Tensor* CreateHostTensor(const TensorShape& shape,
                               const gtl::ArraySlice<T> data) {
        Tensor* host_tensor =
            new Tensor(host_allocator_, DataTypeToEnum<T>::v(), shape);
        test::FillValues<T>(host_tensor, data);
        tensors_.push_back(host_tensor);
        return host_tensor;
      }
    
      // Creates a Tensor on device using the device_allocator_
      template <typename T>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 10K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_launch_util_test.cc

      Tensor* CreateHostTensor(const TensorShape& shape,
                               const gtl::ArraySlice<T> data) {
        Tensor* host_tensor =
            new Tensor(host_allocator_, DataTypeToEnum<T>::v(), shape);
        test::FillValues<T>(host_tensor, data);
        tensors_.push_back(host_tensor);
        return host_tensor;
      }
    
      // Creates a Tensor on device using the device_allocator_
      template <typename T>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 28.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/kernels/xla_ops.cc

            args.device_context = new XlaHostRecvDeviceContext(
                stream, device_memory_base, shape, done_event);
    
            Tensor host_tensor;
            TF_RETURN_IF_ERROR(
                ctx->rendezvous()->Send(parsed_key, args, host_tensor, false));
    
            return std::move(done_event);
          };
    }
    
    // Provide RecvDeviceMemoryFunction for XLA host callbacks.  This callback
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

        DefaultValuedOptionalAttr<BoolAttr, "false">:$enable_large_batch_splitting
      );
    
      let results = (outs
        Res<Variadic<TF_Tensor>, [{The output tensors.}]>:$out_tensors
      );
    
      TF_DerivedOperandTypeListAttr Tcaptured = TF_DerivedOperandTypeListAttr<1>;
      TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
Back to top