Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 13 for out_tensor (0.13 sec)

  1. tensorflow/c/checkpoint_reader.cc

        const string& name, std::unique_ptr<tensorflow::Tensor>* out_tensor,
        TF_Status* out_status) const {
      Status status;
      if (reader_ != nullptr) {
        status = reader_->GetTensor(name, out_tensor);
      } else {
        tensorflow::DataType dtype;
        tensorflow::TensorShape shape;
        status = v2_reader_->LookupDtypeAndShape(name, &dtype, &shape);
        if (status.ok()) {
          out_tensor->reset(new Tensor(dtype, shape));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 21:29:12 UTC 2023
    - 5.5K bytes
    - Viewed (0)
  2. tensorflow/c/checkpoint_reader.h

      const TensorSliceReader::VarToDataTypeMap& GetVariableToDataTypeMap() const;
    
      // Attempts to look up the tensor named "name" and stores the found result in
      // "out_tensor".
      void GetTensor(const string& name,
                     std::unique_ptr<tensorflow::Tensor>* out_tensor,
                     TF_Status* out_status) const;
    
     private:
      // Uses "v2_reader_" to build "var name -> shape" and "var name -> data type"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 12 08:49:52 UTC 2023
    - 3.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfrt/runtime_fallback/runtime_fallback_ops.td

               "type$val", where type can be "bool", "string", "tfdtype", "tfshape",
               "tftensor".
               The value serialization format can be found in attr_util.h.
    
          %out_c, %out_tensor = "tfd.delegate_kernel"(
            %in_c, %in1_tensor, %in2_tensor) {
            _name = "MatMul",
            attr1_name = "transpose_a", attr1_value = "bool$false",
            attr2_name = "transpose_b", attr2_value = "bool$false"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 23 19:35:12 UTC 2023
    - 5.9K bytes
    - Viewed (0)
  4. tensorflow/cc/framework/while_gradients_test.cc

        }
    
        std::vector<Operation> run_outputs;
        std::vector<Tensor> out_tensors;
        TF_ASSERT_OK(session.Run(run_options, feeds, grad_outputs_, run_outputs,
                                 &out_tensors, run_metadata));
        ASSERT_EQ(out_tensors.size(), grad_outputs_.size());
    
        DCHECK_EQ(expected_grad_values.size(), out_tensors.size());
        for (int i = 0; i < out_tensors.size(); ++i) {
          test::ExpectTensorEqual<T>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 27 20:32:17 UTC 2017
    - 7.7K bytes
    - Viewed (0)
  5. tensorflow/cc/ops/while_loop_test.cc

        }
    
        std::vector<Tensor> out_tensors;
        TF_ASSERT_OK(session.Run(feeds, outputs_, &out_tensors));
        ASSERT_EQ(out_tensors.size(), outputs_.size());
    
        DCHECK_EQ(expected_output_values.size(), out_tensors.size());
        for (int i = 0; i < out_tensors.size(); ++i) {
          test::ExpectTensorEqual<T>(
              out_tensors[i], test::AsTensor<T>({expected_output_values[i]}, {}));
        }
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 13 22:30:58 UTC 2023
    - 6.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_launch_util_gpu_test.cc

      Tensor* CreateHostTensor(const TensorShape& shape,
                               const gtl::ArraySlice<T> data) {
        Tensor* host_tensor =
            new Tensor(host_allocator_, DataTypeToEnum<T>::v(), shape);
        test::FillValues<T>(host_tensor, data);
        tensors_.push_back(host_tensor);
        return host_tensor;
      }
    
      // Creates a Tensor on device using the device_allocator_
      template <typename T>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 10K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/xla_launch_util_test.cc

      Tensor* CreateHostTensor(const TensorShape& shape,
                               const gtl::ArraySlice<T> data) {
        Tensor* host_tensor =
            new Tensor(host_allocator_, DataTypeToEnum<T>::v(), shape);
        test::FillValues<T>(host_tensor, data);
        tensors_.push_back(host_tensor);
        return host_tensor;
      }
    
      // Creates a Tensor on device using the device_allocator_
      template <typename T>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 28.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfrt/runtime_fallback/runtime_fallback_combine.cc

    // This optimizes the following scenario:
    // %tft0, %c2 = "tfd.move_dht_to_tft"(%dht0, %c1)
    //     : (!dht.host_tensor, !tfrt.chain) -> (!tfd.tf_tensor, !tfrt.chain)
    // %dht1, %c3 = "tfd.convert_tft_to_dht"(%tft0, %c2)
    //     : (!tfd.tf_tensor, !tfrt.chain) -> (!dht.host_tensor, !tfrt.chain)
    // some_op %dht1, %c3
    //
    // becomes
    // some_op %dht0, %c1
    
    struct SimplifyDoubleConversion
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 09 12:09:19 UTC 2022
    - 3.5K bytes
    - Viewed (0)
  9. tensorflow/cc/experimental/libtf/tests/runtime_test.cc

                              module.Get<Callable>(String("test_int")));
    
      // Call the function
      TF_ASSERT_OK_AND_ASSIGN(Tensor host_tensor,
                              runtime.CreateHostTensor<int>({}, TF_INT32, {2}));
    
      TF_ASSERT_OK_AND_ASSIGN(Tensor tensor, fn.Call<Tensor>(Tensor(host_tensor)));
    
      int out_val[1];
      TF_ASSERT_OK(tensor.GetValue(absl::MakeSpan(out_val)));
      EXPECT_EQ(out_val[0], 6);
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 09 12:27:54 UTC 2023
    - 4.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.cc

        for (const int old_tensor : std::vector<int>(operations_[iop].tensors)) {
          const auto new_tensor =
              std::lower_bound(new_tensors.begin(), new_tensors.end(),
                               std::make_pair(old_tensor, 0));
          if (new_tensor != new_tensors.end() && new_tensor->first == old_tensor) {
            DelUse(iop, old_tensor);
            AddUse(iop, new_tensor->second);
          }
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 14 20:57:44 UTC 2023
    - 13.7K bytes
    - Viewed (0)
Back to top