- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 63 for device_str (0.59 sec)
-
tensorflow/compiler/jit/device_util.cc
compilation_device = nullptr; } id_to_compilation_device_.push_back(compilation_device); return DeviceId(new_id); } string DeviceInfoCache::DebugString(const DeviceSet& device_set) const { std::vector<string> names; device_set.ForEach([&](DeviceId device_id) { names.push_back(string(GetNameFor(device_id))); return true; }); return absl::StrCat("[", absl::StrJoin(names, ","), "]"); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/jit/test_util.h
#include <map> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "tensorflow/compiler/jit/shape_inference.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/optimization_registry.h" #include "tensorflow/core/common_runtime/process_function_library_runtime.h" #include "tensorflow/core/framework/function.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 16 18:03:15 UTC 2023 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass_test.cc
devices.push_back(FakeDevice::Make(kDeviceName, DEVICE_GPU)); devices.push_back(FakeDevice::Make(kHostName, DEVICE_CPU)); std::unique_ptr<DeviceSet> device_set(new DeviceSet()); for (auto& device : devices) { device_set->AddDevice(device.get()); } auto graph = std::make_unique<Graph>(OpRegistry::Global()); SessionOptions session_options; session_options.config.mutable_graph_options()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 18.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/integration/graph_decompose_pass.h
// Whether to run this pass. If this is enabled, the GraphDef will be imported // to MLIR even no tf composition file is found. ::tensorflow::MlirOptimizationPassState GetPassState( const DeviceSet* device_set, const ConfigProto& config_proto, const Graph& graph, const FunctionLibraryDefinition& function_library) const override; // This should be used as a thin mapper around mlir::ModulePass::runOnModule
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/jit/build_xla_ops_pass.cc
device_set.Insert(device_id); } TF_ASSIGN_OR_RETURN(jit::DeviceId result, PickDeviceForXla(*device_info_cache, device_set, /*allow_mixing_unknown_and_cpu=*/true)); VLOG(2) << "For " << function_name << " PickDeviceForXla(" << device_info_cache->DebugString(device_set) << ") -> "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/fake_session.h
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_FAKE_SESSION_H_ #include <memory> #include <string> #include <utility> #include <vector> #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/public/session.h" namespace mlir { namespace TF {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 04:50:13 UTC 2023 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/session_utils.cc
#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/StringRef.h" #include "mlir/Support/LLVM.h" // from @llvm-project #include "tensorflow/compiler/mlir/utils/string_container_utils.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/framework/device.h" #include "tensorflow/core/framework/resource_var.h" namespace mlir { namespace tf_saved_model { std::string GetVariableName(TF::VarHandleOp var_handle_op) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/graph_optimization_pass.cc
const std::string& function_name, const ConfigProto& config_proto, ModuleOp module, const Graph& graph, const tensorflow::FunctionLibraryDefinition& function_library) { if (GetPassState(/*device_set=*/nullptr, config_proto, graph, function_library) == ::tensorflow::MlirOptimizationPassState::Disabled) { VLOG(1) << "Skipping MLIR Graph Optimization Pass"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 09:56:53 UTC 2024 - 3K bytes - Viewed (0) -
tensorflow/c/experimental/saved_model/core/ops/variable_ops_test.cc
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h" #include "tensorflow/c/experimental/saved_model/core/test_utils.h" #include "tensorflow/c/tensor_interface.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/eager/context.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/status_test_util.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 21 19:26:54 UTC 2020 - 3.8K bytes - Viewed (0) -
tensorflow/c/experimental/saved_model/core/constant_loading_test.cc
#include "tensorflow/c/experimental/saved_model/core/saved_model_utils.h" #include "tensorflow/c/experimental/saved_model/core/test_utils.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/eager/context.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/tensor_shape.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Aug 11 01:20:50 UTC 2021 - 4.2K bytes - Viewed (0)