Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 14 for shield (0.19 sec)

  1. tensorflow/c/eager/c_api_cluster_test.cc

      ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    
      // The device of var_handle0 is local device which is the same before and
      // after cluster update. Remove resource with valid device should succeed.
      TFE_Op* op = TFE_NewOp(ctx, "DestroyResourceOp", status);
      ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
      TFE_OpAddInput(op, var_handle0, status);
      TFE_OpSetDevice(op, dev0_name, status);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Apr 14 10:03:59 GMT 2023
    - 19.3K bytes
    - Viewed (0)
  2. tensorflow/c/c_api_experimental.cc

          tensorflow::GetBuildXlaOpsPassFlags()->tf_xla_disable_constant_folding);
    }
    
    void TF_SetXlaConstantFoldingDisabled(unsigned char should_enable) {
      tensorflow::GetBuildXlaOpsPassFlags()->tf_xla_disable_constant_folding =
          static_cast<bool>(should_enable);
    }
    
    void TF_SetXlaMinClusterSize(int size) {
      tensorflow::MarkForCompilationPassFlags* flags =
          tensorflow::GetMarkForCompilationPassFlags();
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 29.4K bytes
    - Viewed (0)
  3. tensorflow/c/experimental/gradients/math_grad_test.cc

      }
    
      ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
          DivNoNanModel, DivNoNanGradModel, immediate_execution_ctx_.get(),
          {x.get(), y.get()}, UseFunction()));
    
      // `DivNoNanGradModel` should return {`0`, `0`} when the denominator is `0`.
      AbstractTensorHandlePtr z;
      {
        AbstractTensorHandle* z_raw = nullptr;
        status_ = TestScalarTensorHandle<float, TF_FLOAT>(
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Thu Apr 13 17:32:14 GMT 2023
    - 16.3K bytes
    - Viewed (0)
  4. tensorflow/c/experimental/filesystem/plugins/posix/posix_filesystem.cc

    }
    
    static void Sync(const TF_WritableFile* file, TF_Status* status) {
      // For historical reasons, this does the same as `Flush` at the moment.
      // TODO(b/144055243): This should use `fsync`/`sync`.
      Flush(file, status);
    }
    
    static void Close(const TF_WritableFile* file, TF_Status* status) {
      auto posix_file = static_cast<PosixFile*>(file->plugin_file);
    
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Sun Mar 24 20:08:23 GMT 2024
    - 15.8K bytes
    - Viewed (0)
  5. tensorflow/c/eager/parallel_device/parallel_device_testlib.cc

      TFE_OpSetAttrShape(op.get(), "shape", dims, num_dims, status);
      TFE_OpSetAttrString(op.get(), "container", "", 0);
      // Use the special GUID for no buffer sharing
      //
      // TODO(allenl): Should we provide a better API for this? AFAIK this is the
      // only reasonable way to make variables with no aliasing using the eager C
      // API.
      std::string no_sharing = "cd2c89b7-88b7-44c8-ad83-06c2a9158347";
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Jun 15 15:44:44 GMT 2021
    - 12.5K bytes
    - Viewed (0)
  6. tensorflow/c/eager/parallel_device/parallel_device.cc

        const TFE_OpAttrs* attributes, int expected_max_outputs,
        TF_Status* status) {
      absl::optional<std::vector<MaybeParallelTensorOwned>> result;
      // TODO(allenl): We should remove "TPU" from these op names at the very least,
      // or consider other ways of packing/unpacking parallel tensors.
      if (operation_name == std::string("TPUReplicatedInput")) {
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Mar 29 22:05:31 GMT 2023
    - 18.3K bytes
    - Viewed (0)
  7. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

    //
    // DeviceThread itself is thread-safe, in that StartExecute will block if there
    // is a pending execution. Since StartExecute is equivalent to grabbing a lock,
    // multiple DeviceThreads should always be accessed in the same order to avoid
    // deadlocks.
    class DeviceThread {
     public:
      // Starts a background thread waiting for `StartExecute`.
      explicit DeviceThread(const std::string& device, const bool is_async,
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
  8. tensorflow/c/eager/parallel_device/parallel_device_test.cc

      ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
    
      // Un-pack the parallel tensor to verify that the operation was
      // successful. The resulting structure should be:
      //   second_device{first_device{1. * 3., 2. * 3.}, 3. * 3.}.
      std::array<TensorHandlePtr, 2> second_components;
      ExtractPerDeviceValues(context.get(), multiply_result.get(),
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Jul 08 23:47:35 GMT 2021
    - 29.3K bytes
    - Viewed (1)
  9. tensorflow/c/eager/c_api.cc

    #endif  // !IS_MOBILE_PLATFORM
      return tensorflow::wrap(eager_context);
    }
    
    void TFE_DeleteContext(TFE_Context* ctx) {
      if (ctx == nullptr) {
        return;
      }
    
      // ctx->RefCountIsOne() should be true here.
      tensorflow::unwrap(ctx)->Release();
    }
    
    TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx, TF_Status* status) {
      TF_DeviceList* l = new TF_DeviceList;
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Mar 12 20:00:09 GMT 2024
    - 43.9K bytes
    - Viewed (2)
  10. tensorflow/c/eager/c_api_experimental.cc

            tensorflow::unwrap(op_to_reset);
        op->Clear();
        status->status = op->Reset(op_or_function_name, raw_device_name);
      } else {
        TF_SetStatus(status, TF_INVALID_ARGUMENT,
                     "op_to_reset should not be nullptr");
      }
    }
    
    void TFE_ContextEnableGraphCollection(TFE_Context* ctx) {
      tensorflow::unwrap(ctx)->SetShouldStoreGraphs(true);
    }
    
    void TFE_ContextDisableGraphCollection(TFE_Context* ctx) {
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Apr 11 23:52:39 GMT 2024
    - 35.9K bytes
    - Viewed (3)
Back to top