Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 118 for tensors (0.18 sec)

  1. tensorflow/c/eager/tape.h

    void GradientTape<Gradient, BackwardFunction, TapeTensor>::DeleteTrace(
        int64_t tensor_id) {
      auto it = tensor_usage_.find(tensor_id);
      if (it == tensor_usage_.end()) {
        return;
      }
      it->second--;
      if (it->second != 0) {
        return;
      }
      tensor_usage_.erase(it);
      auto tensor_op_it = tensor_tape_.find(tensor_id);
      if (tensor_op_it == tensor_tape_.end()) {
        return;
      }
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Apr 02 12:40:29 GMT 2024
    - 47.2K bytes
    - Viewed (1)
  2. tensorflow/c/eager/parallel_device/parallel_device_lib.h

          TF_Status* status);
    
      size_t num_tensors() const { return tensors_.size(); }
      TFE_TensorHandle* tensor(size_t index) const { return tensors_[index].get(); }
    
      // If the `shape` argument to `FromTensorHandles` is specified, returns that.
      //
      // Otherwise if all of the tensors have the same shape, returns that via the
      // `shape` output argument. This blocks waiting for async tensors, may return
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Apr 25 15:21:13 GMT 2023
    - 12.9K bytes
    - Viewed (0)
  3. tensorflow/c/eager/parallel_device/parallel_device.cc

      ParallelTensor* parallel_tensor = reinterpret_cast<ParallelTensor*>(
          TFE_TensorHandleDevicePointer(tensor, status));
      if (TF_GetCode(status) != TF_OK) return nullptr;
      if (parallel_tensor->num_tensors() == 1) {
        // Copy-off for single-device tensors is allowed to make debugging dynamic
        // control flow easier.
        return TFE_TensorHandleCopySharingTensor(parallel_tensor->tensor(0),
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Mar 29 22:05:31 GMT 2023
    - 18.3K bytes
    - Viewed (0)
  4. tensorflow/c/eager/gradients.cc

        absl::Span<const AbstractTensorHandle* const> tensors) const {
      std::vector<int64_t> tensor_ids(tensors.size());
      std::vector<tensorflow::DataType> tensor_dtypes(tensors.size());
      for (int i = 0; i < tensors.size(); i++) {
        tensor_ids[i] = ToId(tensors[i]);
        tensor_dtypes[i] = tensors[i]->DataType();
      }
      return GradientTape::ShouldRecord(tensor_ids, tensor_dtypes);
    }
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 19.3K bytes
    - Viewed (0)
  5. tensorflow/c/c_api_function.cc

    }
    
    // Converts `noutputs` and `outputs` into `outputs_tensors` and does various
    // checks while doing so.
    Status ProcessOutputs(const TF_Graph* fn_body, const char* fn_name,
                          int noutputs, const TF_Output* outputs,
                          std::vector<OutputTensor>* output_tensors)
        TF_EXCLUSIVE_LOCKS_REQUIRED(fn_body->mu) {
      output_tensors->reserve(noutputs);
      for (int i = 0; i < noutputs; ++i) {
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 13.6K bytes
    - Viewed (2)
  6. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

      for (int component_index = 0; component_index < tensors_.size();
           ++component_index) {
        // TODO(allenl): Add a C API for summarizing tensors. Currently custom
        // devices limiting themselves to a C API (for ABI compatibility) would need
        // to implement summarization for component tensors themselves.
        ImmediateExecutionTensorHandle* component =
            tensorflow::unwrap(tensors_[component_index].get());
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
  7. tensorflow/c/c_api_experimental.cc

        // below. Allocate enough space so that no reallocation happens, which will
        // make the pointers invalid.
        all_input_tensors.reserve(num_inputs);
        for (int i = 0; i < num_inputs; ++i) {
          if (input_tensors[i] == nullptr) continue;
          all_input_tensors.emplace_back();
          Tensor& input_tensor = all_input_tensors.back();
          status->status = TF_TensorToTensor(input_tensors[i], &input_tensor);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 29.4K bytes
    - Viewed (0)
  8. tensorflow/c/eager/parallel_device/parallel_device.h

    // an error.
    //
    // All component tensors must have the same dtype. Currently they must also have
    // the same shape, although this requirement may be relaxed in the future.
    //
    // `device_name` must not name an existing physical or custom device (see
    // the documentation for TFE_RegisterCustomDevice for more information).
    //
    // Tensors may be copied on or off the device explicitly using
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Jun 04 21:49:16 GMT 2020
    - 2.9K bytes
    - Viewed (0)
  9. tensorflow/c/eager/c_api.h

                                                       TF_Status* status);
    
    // A handle to a tensor on a device.
    //
    // Like a TF_Tensor, a TFE_TensorHandle refers to a tensor with a value, shape,
    // type etc. Unlike a TF_Tensor, a TFE_TensorHandle may refer to such tensors
    // placed in the memory of different devices or remote address spaces.
    typedef struct TFE_TensorHandle TFE_TensorHandle;
    
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Apr 27 21:07:00 GMT 2023
    - 22.8K bytes
    - Viewed (1)
  10. tensorflow/c/c_api_experimental.h

    // from a placeholder node "arg_tensor_enqueue_<tensor_id>".
    //
    // `tensor` is still owned by the caller. This call will be blocked if the queue
    // has reached its capacity, and will be unblocked when the queued tensors again
    // drop below the capacity due to dequeuing.
    //
    // Tensors are dequeued via the corresponding TF dequeue op.
    // TODO(hongm): Add support for `timeout_ms`.
    TF_CAPI_EXPORT extern void TF_EnqueueNamedTensor(TF_Session* session,
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Apr 27 21:07:00 GMT 2023
    - 15.1K bytes
    - Viewed (0)
Back to top