Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 25 for Index (0.15 sec)

  1. ci/official/utilities/code_check_changed_files.bats

        echo "============================="
        grep -e 'BUILD' $BATS_FILE_TMPDIR/changed_files \
            | xargs buildifier -v -mode=diff -diff_command="git diff --no-index"
    }
    
    # Note: this is excluded on the full code base, since any submitted code must
    # have passed Google's internal style guidelines.
    @test "Check formatting for C++ files" {
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Jan 10 19:39:41 GMT 2024
    - 4K bytes
    - Viewed (0)
  2. tensorflow/c/c_api_test.cc

      TF_Output add_in_0 = TF_OperationInput(TF_Input{add, 0});
      EXPECT_EQ(feed, add_in_0.oper);
      EXPECT_EQ(0, add_in_0.index);
      TF_Output add_in_1 = TF_OperationInput(TF_Input{add, 1});
      EXPECT_EQ(three, add_in_1.oper);
      EXPECT_EQ(0, add_in_1.index);
      EXPECT_EQ(0, TF_OperationOutputNumConsumers(TF_Output{add, 0}));
      EXPECT_EQ(0, TF_OperationNumControlInputs(add));
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 96.9K bytes
    - Viewed (3)
  3. tensorflow/c/c_api.cc

      if (ic->num_outputs() <= new_src.index) {
        status->status = tensorflow::errors::OutOfRange(
            "Cannot update edge. Output index [", new_src.index,
            "] is greater than the number of total outputs [", ic->num_outputs(),
            "].");
        return;
      }
      tensorflow::shape_inference::ShapeHandle shape = ic->output(new_src.index);
    
      tensorflow::shape_inference::InferenceContext* ic_dst =
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 102.3K bytes
    - Viewed (0)
  4. ci/official/containers/linux_arm64/cuda.packages.txt

    # CuDNN: https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#ubuntu-network-installation
    libcudnn8=8.9.6.50-1+cuda12.2
    libcudnn8-dev=8.9.6.50-1+cuda12.2
    
    # This can be removed once NVIDIA publishes a cuda-12.3.2 Docker image.
    # For now it ensures that we install at least version 12.3.107 of PTXAS,
    # since 12.3.103 has a bug.
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Mon Jan 08 09:32:19 GMT 2024
    - 368 bytes
    - Viewed (1)
  5. ci/official/envs/linux_arm64

    TFCI_DOCKER_ENABLE=1
    TFCI_DOCKER_IMAGE=gcr.io/tensorflow-sigs/build-arm64:tf-2-16-multi-python
    TFCI_DOCKER_PULL_ENABLE=1
    TFCI_DOCKER_REBUILD_ARGS="--target=tf ci/official/containers/linux_arm64"
    TFCI_INDEX_HTML_ENABLE=1
    TFCI_LIB_SUFFIX="-cpu-linux-arm64"
    TFCI_OUTPUT_DIR=build_output
    TFCI_WHL_AUDIT_ENABLE=1
    TFCI_WHL_AUDIT_PLAT=manylinux2014_aarch64
    TFCI_WHL_BAZEL_TEST_ENABLE=1
    TFCI_WHL_SIZE_LIMIT=240M
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 23:12:40 GMT 2024
    - 1.5K bytes
    - Viewed (1)
  6. tensorflow/c/c_api_experimental.cc

                                            int index) {
      DCHECK(index >= 0 && index < shape_list->num_items);
      TF_ShapeAndType& shape = shape_list->items[index];
      DCHECK(shape.dims == nullptr) << "Shape at " << index << " is already set!";
      shape.num_dims = -1;
      shape.dims = nullptr;
    }
    
    void TF_ShapeAndTypeListSetDtype(TF_ShapeAndTypeList* shape_list, int index,
                                     TF_DataType dtype) {
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 29.4K bytes
    - Viewed (0)
  7. tensorflow/c/eager/c_api_test_util.cc

        const tensorflow::ServerDef& server_def, int task_index) {
      tensorflow::ServerDef server_def_copy = server_def;
      tensorflow::ClusterDef* cluster_def = server_def_copy.mutable_cluster();
      tensorflow::JobDef* job_def = cluster_def->mutable_job(0);
      const int port = tensorflow::testing::PickUnusedPortOrDie();
      job_def->mutable_tasks()->at(task_index) =
          tensorflow::strings::StrCat("localhost:", port);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Feb 21 22:37:46 GMT 2024
    - 23.5K bytes
    - Viewed (2)
  8. tensorflow/c/eager/tape.h

      in_grads.reserve(input_tensors.size());
      for (int target_index = 0; target_index < input_tensors.size();
           ++target_index) {
        const auto current_grad =
            accumulated_gradients_.find(input_tensors[target_index].GetID());
        if (current_grad == accumulated_gradients_.end()) {
          if (IsDtypeTrainable(input_tensors[target_index].GetDType())) {
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Apr 02 12:40:29 GMT 2024
    - 47.2K bytes
    - Viewed (1)
  9. tensorflow/c/eager/c_api.cc

      return num_elements;
    }
    
    int64_t TFE_TensorHandleDim(TFE_TensorHandle* h, int dim_index,
                                TF_Status* status) {
      if (h == nullptr) {
        status->status = tensorflow::errors::InvalidArgument("Invalid handle");
        return -1;
      }
    
      int64_t dim = -1;
      status->status = tensorflow::unwrap(h)->Dim(dim_index, &dim);
      return dim;
    }
    
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Mar 12 20:00:09 GMT 2024
    - 43.9K bytes
    - Viewed (2)
  10. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

      for (int device_index = 0; device_index < underlying_devices_.size();
           ++device_index) {
        DeviceThread* device_thread = device_threads_[device_index].get();
        std::vector<TFE_TensorHandle*> device_inputs;
        device_inputs.reserve(inputs.size());
        for (int input_index = 0; input_index < inputs.size(); ++input_index) {
          // Parallel tensors are divided between operations by device.
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
Back to top