Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 16 for fmix (1.63 sec)

  1. tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.cc

      // we have inconsistent state within the cache. Note: it's possible some
      // incomplete reads may still go undetected.
      if (block->data.size() < block_size_) {
        Key fmax = std::make_pair(key.first, std::numeric_limits<size_t>::max());
        auto fcmp = block_map_.upper_bound(fmax);
        if (fcmp != block_map_.begin() && key < (--fcmp)->first) {
          return TF_SetStatus(status, TF_INTERNAL,
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Thu Jul 16 01:39:09 GMT 2020
    - 11.1K bytes
    - Viewed (0)
  2. tensorflow/c/c_api_test.cc

      ASSERT_TRUE(o == nullptr);  // It is unsafe to move memory TF might not own.
      TF_DeleteTensor(t);
      EXPECT_TRUE(deallocator_called);
    }
    
    TEST(CAPI, LibraryLoadFunctions) {
      // TODO(b/73318067): Fix linking for the GPU test generated by the
      // tf_cuda_cc_test() bazel rule and remove the next line.
      if (!GPUDeviceName().empty()) return;
    
    #if !defined(TENSORFLOW_NO_SHARED_OBJECTS)
      {
        // Load the library.
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 96.9K bytes
    - Viewed (3)
  3. tensorflow/c/eager/c_api_test.cc

    }
    
    void ExecuteAdd(bool async, bool forward_input, bool tfrt) {
    #ifdef PLATFORM_WINDOWS
      // On windows, we flakily get a failure due to pointer instability.
      // Disable the 4 tests using this helper until we fix the issue.
      return;
    #else
      TF_Status* status = TF_NewStatus();
      TFE_ContextOptions* opts = TFE_NewContextOptions();
      TFE_ContextOptionsSetTfrt(opts, tfrt);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Aug 03 20:50:20 GMT 2023
    - 94.6K bytes
    - Viewed (1)
  4. CONTRIBUTING.md

        to the core.
    *   As every PR requires several CPU/GPU hours of CI testing, we discourage
        submitting PRs to fix one typo, one warning,etc. We recommend fixing the
        same issue at the file level at least (e.g.: fix all typos in a file, fix
        all compiler warnings in a file, etc.)
    *   Tests should follow the
        [testing best practices](https://www.tensorflow.org/community/contribute/tests)
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Thu Mar 21 11:45:51 GMT 2024
    - 15.6K bytes
    - Viewed (0)
  5. ci/official/containers/linux_arm64/setup.python.sh

      # set up symlink for devtoolset-10
      rm -f /dt10/usr/include/aarch64-linux-gnu/$f
      ln -s /usr/include/aarch64-linux-gnu/$f /dt10/usr/include/aarch64-linux-gnu/$f
    done
    popd
    
    # Python 3.10 include headers fix:
    # sysconfig.get_path('include') incorrectly points to /usr/local/include/python
    # map /usr/include/python3.10 to /usr/local/include/python3.10
    if [[ ! -f "/usr/local/include/$VERSION" ]]; then
    Shell Script
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Sep 29 00:26:34 GMT 2023
    - 2.1K bytes
    - Viewed (0)
  6. ci/official/containers/linux_arm64/builder.devtoolset/build_devtoolset.sh

    ../glibc-src/configure --prefix=/usr --disable-werror --enable-obsolete-rpc --disable-profile
    make -j$(nproc)
    make install DESTDIR=${TARGET}
    cd ..
    
    # Symlinks in the binary distribution are set up for installation in /usr, we
    # need to fix up all the links to stay within /${TARGET}.
    /fixlinks.sh "/${TARGET}"
    
    # Patch to allow non-glibc 2.12 compatible builds to work.
    sed -i '54i#define TCP_USER_TIMEOUT 18' "/${TARGET}/usr/include/netinet/tcp.h"
    
    Shell Script
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Fri Sep 29 00:26:34 GMT 2023
    - 6.1K bytes
    - Viewed (1)
  7. tensorflow/c/eager/immediate_execution_distributed_manager.h

      // Initializes context for the local worker and no contexts will be created
      // for remote workers. Currently this only works for resetting context.
      // TODO(b/289445025): Consider removing this when we find a proper fix.
      virtual Status InitializeLocalOnlyContext(const ServerDef& server_def,
                                                int keep_alive_secs) = 0;
    
      // Set up a multi-client distributed execution environment. Must be called
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Feb 21 22:37:46 GMT 2024
    - 2.9K bytes
    - Viewed (0)
  8. tensorflow/c/experimental/gradients/nn_grad.cc

                        AbstractTensorHandle* mat,
                        absl::Span<AbstractTensorHandle*> outputs) {
      if (!isa<ImmediateExecutionContext>(ctx)) {
        // TODO(b/168850692): Fix this.
        return errors::Unimplemented(
            "BroadcastMul is not supported in tracing mode yet.");
      }
      auto imm_ctx = dyn_cast<ImmediateExecutionContext>(ctx);
      AbstractTensorPtr minus_1(imm_ctx->CreateInt32Scalar(-1));
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Wed Feb 28 13:53:47 GMT 2024
    - 5.7K bytes
    - Viewed (0)
  9. tensorflow/c/eager/gradients.cc

      // and not for correctness. The only downside of keeping this 1 seems to be
      // that the gradient accumulation is unbounded and we will never
      // aggressively aggregate accumulated gradients to recover memory.
      // Revisit and fix.
      return 1;
    }
    
    // Consumes references to the tensors in the gradient_tensors list and returns
    // a tensor with the result.
    AbstractTensorHandle* TapeVSpace::AggregateGradients(
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 19.3K bytes
    - Viewed (0)
  10. tensorflow/c/eager/custom_device_test.cc

      ASSERT_NE(TF_OK, TF_GetCode(status.get()));
      ASSERT_TRUE(absl::StrContains(TF_Message(status.get()), custom0));
      ASSERT_TRUE(absl::StrContains(TF_Message(status.get()), custom1));
    
      // Custom device: mix of custom/physical places the op on the custom device.
      matmul.reset(MatMulOp(context.get(), hcustom0.get(), hcpu.get()));
      num_retvals = 1;
      executed = false;
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Aug 27 23:39:24 GMT 2020
    - 18.4K bytes
    - Viewed (0)
Back to top