- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 31 for device (0.06 sec)
-
tensorflow/c/c_api.h
TF_ImportGraphDefOptions* opts, const char* prefix); // Set the execution device for nodes in `graph_def`. // Only applies to nodes where a device was not already explicitly specified. // `device` is copied and has no lifetime requirements. TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetDefaultDevice( TF_ImportGraphDefOptions* opts, const char* device); // Set whether to uniquify imported operation names. If true, imported operation
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Thu Oct 26 21:08:15 UTC 2023 - 82.3K bytes - Viewed (0) -
tensorflow/c/eager/c_api_test.cc
} auto tag = tensorflow::strings::StrCat("Device #", i, " (", name, ")"); // Copy to device TFE_TensorHandle* hdevice = TFE_TensorHandleCopyToDevice(hcpu, ctx, name.c_str(), status.get()); if (TF_GetCode(status.get()) != TF_OK) { ADD_FAILURE() << tag << " -- " << TF_Message(status.get()); continue; } // Copy from device to the same device. TFE_TensorHandle* hdevice2 =
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Thu Aug 03 20:50:20 UTC 2023 - 94.6K bytes - Viewed (0) -
tensorflow/c/c_api_test.cc
csession.CloseAndDelete(s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_DeleteGraph(graph); TF_DeleteStatus(s); } // If `device` is non-empty, run Min op on that device. // Otherwise run it on the default device (CPU). void RunMinTest(const string& device, bool use_XLA) { TF_Status* s = TF_NewStatus(); TF_Graph* graph = TF_NewGraph(); // Make a placeholder operation.
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Sat Oct 12 16:27:48 UTC 2024 - 97K bytes - Viewed (0) -
tests/test_application.py
Registered: Sun Nov 03 07:19:11 UTC 2024 - Last Modified: Thu Apr 18 21:56:59 UTC 2024 - 52.2K bytes - Viewed (0) -
tensorflow/BUILD
name = "is_cuda_enabled", actual = if_oss( "@local_config_cuda//:is_cuda_enabled", "@local_config_cuda//cuda:using_config_cuda", ), ) # Config setting that is satisfied when CUDA device code should be compiled # with clang. It does not imply that CUDA support has been enabled. alias( name = "is_cuda_compiler_clang", actual = if_oss( "@local_config_cuda//:is_cuda_compiler_clang",
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Wed Oct 16 05:28:35 UTC 2024 - 53.5K bytes - Viewed (0) -
CHANGELOG/CHANGELOG-1.3.md
* Mounting (only 'default-token') volume takes a long time when creating a batch of pods (parallelization issue) ([28616](https://github.com/kubernetes/kubernetes/issues/28616)) * Error while tearing down pod, "device or resource busy" on service account secret ([28750](https://github.com/kubernetes/kubernetes/issues/28750)) # v1.3.2 [Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples)
Registered: Fri Nov 01 09:05:11 UTC 2024 - Last Modified: Thu Dec 24 02:28:26 UTC 2020 - 84K bytes - Viewed (0) -
tensorflow/c/c_api_function_test.cc
}; TEST_F(CApiFunctionTest, OneOp_ZeroInputs_OneOutput) { /* * constant * | * v */ // Define TF_Operation* c = ScalarConst(10, func_graph_, s_, "scalar10"); Define(-1, {}, {}, {c}, {}); // Use, run, and verify TF_Operation* func_op = Use({}); Run({}, func_op, 10); VerifyFDef({"scalar10_0"}, {}, {{"scalar10", DT_INT32}},
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Thu Jul 20 22:08:54 UTC 2023 - 63.6K bytes - Viewed (0) -
fess-crawler/src/test/resources/extractor/eml/sample2.eml
<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="viewport" content="width=device-width, minimum-scale=1.0, maximum-scale=1.0, user-scalable=0" /> <meta name="apple-mobile-web-app-capable" content="yes" /> <style type="text/css"> @media only screen and (max-width: 420px) { a[class="article-headline"] {
Registered: Sun Nov 10 03:50:12 UTC 2024 - Last Modified: Sat Jan 16 07:50:35 UTC 2016 - 91.6K bytes - Viewed (0) -
src/main/assemblies/files/fess-service-x86.exe
Kaoru FUZITA <******@****.***> 1449825322 +0900
Registered: Thu Oct 31 13:40:30 UTC 2024 - Last Modified: Fri Dec 11 09:15:22 UTC 2015 - 79K bytes - Viewed (0) -
.bazelrc
build:mkl --define=build_with_mkl=true --define=enable_mkl=true build:mkl --define=tensorflow_mkldnn_contraction_kernel=0 build:mkl --define=build_with_openmp=true build:mkl -c opt # config to build OneDNN backend with a user specified threadpool. build:mkl_threadpool --define=build_with_mkl=true --define=enable_mkl=true build:mkl_threadpool --define=tensorflow_mkldnn_contraction_kernel=0
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Mon Oct 28 22:02:31 UTC 2024 - 51.3K bytes - Viewed (0)