Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 20 for sizi (0.14 sec)

  1. tensorflow/c/c_api.cc

        metadata.list_size = attr->list().field##_size(); \
        __VA_ARGS__;                                      \
        break;                                            \
      }
    
          LIST_CASE(
              s, TF_ATTR_STRING, metadata.total_size = 0;
              for (int i = 0; i < attr->list().s_size();
                   ++i) { metadata.total_size += attr->list().s(i).size(); });
          LIST_CASE(i, TF_ATTR_INT);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 102.3K bytes
    - Viewed (0)
  2. tensorflow/c/eager/c_api_unified_experimental.cc

      unwrap(o)->expected_num_outputs = num_outputs;
      unwrap(o)->outputs.clear();
      unwrap(o)->outputs.resize(num_outputs);
    }
    int TF_OutputListNumOutputs(TF_OutputList* o) {
      return unwrap(o)->outputs.size();
    }
    TF_AbstractTensor* TF_OutputListGet(TF_OutputList* o, int i) {
      return wrap(unwrap(o)->outputs[i]);
    }
    void TF_OutputListPushBack(TF_OutputList* o, TF_AbstractTensor* tensor,
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 9K bytes
    - Viewed (0)
  3. tensorflow/c/experimental/filesystem/plugins/posix/posix_filesystem.cc

    #include "tensorflow/c/tf_status.h"
    
    // Implementation of a filesystem for POSIX environments.
    // This filesystem will support `file://` and empty (local) URI schemes.
    
    static void* plugin_memory_allocate(size_t size) { return calloc(1, size); }
    static void plugin_memory_free(void* ptr) { free(ptr); }
    
    // SECTION 1. Implementation for `TF_RandomAccessFile`
    // ----------------------------------------------------------------------------
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Sun Mar 24 20:08:23 GMT 2024
    - 15.8K bytes
    - Viewed (0)
  4. tensorflow/c/c_api_experimental.cc

          static_cast<bool>(should_enable);
    }
    
    void TF_SetXlaMinClusterSize(int size) {
      tensorflow::MarkForCompilationPassFlags* flags =
          tensorflow::GetMarkForCompilationPassFlags();
      flags->tf_xla_min_cluster_size = size;
    }
    
    TF_Buffer* TF_CreateConfig(unsigned char enable_xla_compilation,
                               unsigned char gpu_memory_allow_growth,
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 29.4K bytes
    - Viewed (0)
  5. tensorflow/c/experimental/gradients/grad_test_helper.cc

                 AbstractContext* ctx,
                 absl::Span<AbstractTensorHandle* const> inputs,
                 absl::Span<AbstractTensorHandle*> outputs) -> Status {
        Tape tape(/*persistent=*/false);
        for (size_t i{}; i < inputs.size(); ++i) {
          tape.Watch(inputs[i]);
        }
        std::vector<AbstractTensorHandle*> temp_outputs(1);
        AbstractContextPtr tape_ctx(new TapeContext(ctx, &tape, grad_registry));
        TF_RETURN_IF_ERROR(
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Wed Feb 28 13:53:47 GMT 2024
    - 5K bytes
    - Viewed (0)
  6. tensorflow/c/experimental/gradients/custom_gradient_test.cc

      Status Compute(AbstractContext* ctx,
                     absl::Span<AbstractTensorHandle* const> grad_outputs,
                     absl::Span<AbstractTensorHandle*> grad_inputs) override {
        CHECK_EQ(grad_outputs.size(), 1);
        CHECK_EQ(grad_inputs.size(), 1);
        grad_inputs[0] = grad_outputs[0];
        if (grad_inputs[0]) {
          grad_inputs[0]->Ref();
        }
        return absl::OkStatus();
      }
    };
    
    // Computes:
    //
    // @tf.custom_gradient
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Wed Feb 28 13:53:47 GMT 2024
    - 4.8K bytes
    - Viewed (0)
  7. tensorflow/c/experimental/gradients/array_grad.cc

     public:
      Status Compute(AbstractContext* ctx,
                     absl::Span<AbstractTensorHandle* const> grad_outputs,
                     absl::Span<AbstractTensorHandle*> grad_inputs) override {
        for (int i = 0; i < grad_outputs.size(); i++) {
          auto grad_input = grad_outputs[i];
          // TODO(srbs): Should we add a copy contructor to AbstractTensorHandle
          // that takes care of this similar to `Tensor`?
          if (grad_input) {
    C++
    - Registered: Tue Apr 09 12:39:09 GMT 2024
    - Last Modified: Wed Feb 28 13:53:47 GMT 2024
    - 1.6K bytes
    - Viewed (0)
  8. tensorflow/c/eager/gradients.cc

      std::vector<int64_t> input_ids(inputs.size());
      std::vector<tensorflow::DataType> input_dtypes(inputs.size());
      for (int i = 0; i < inputs.size(); i++) {
        input_ids[i] = ToId(inputs[i]);
        input_dtypes[i] = inputs[i]->DataType();
      }
      std::vector<TapeTensor> tape_tensors;
      tape_tensors.reserve(outputs.size());
      for (auto t : outputs) {
        tape_tensors.push_back(TapeTensor(t));
      }
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 19.3K bytes
    - Viewed (0)
  9. tensorflow/c/eager/dlpack.cc

      }
    }
    
    // Wraps the deleter function of DLManagedTensor to match the function signature
    // TFE_NewTensorHandleFromDeviceMemory.
    void DeallocatorWrapperFunc(void* data, size_t len, void* dlmt_vptr) {
      TFE_CallDLManagedTensorDeleter(dlmt_vptr);
    }
    
    // Checks whether the stride array matches the layout of compact, row-majored
    // data.
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 12.8K bytes
    - Viewed (0)
  10. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

      for (int device_index = 0; device_index < underlying_devices_.size();
           ++device_index) {
        DeviceThread* device_thread = device_threads_[device_index].get();
        std::vector<TFE_TensorHandle*> device_inputs;
        device_inputs.reserve(inputs.size());
        for (int input_index = 0; input_index < inputs.size(); ++input_index) {
          // Parallel tensors are divided between operations by device.
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
Back to top