- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 2,196 for Levine (0.49 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/mark_input_output_aliases.cc
#include "mlir/Pass/Pass.h" // from @llvm-project #include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #define DEBUG_TYPE "tf-device-mark-input-output-aliases" namespace mlir { namespace TFDevice { #define GEN_PASS_DEF_MARKINPUTOUTPUTALIASESPASS #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc" namespace { struct MarkInputOutputAliasesPass
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 04:14:26 UTC 2024 - 7.5K bytes - Viewed (0) -
tensorflow/c/experimental/saved_model/core/revived_types/restored_resource.h
// To keep things simple, I've stuck to raw pointers for now. // // Params: // device - The device string associated with the SavedResource // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/protobuf/saved_object_graph.proto#L182 // Conceptually, this is the same device used in CapturableResource:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 23 04:49:47 UTC 2020 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_device.h
// This file defines the tf_device dialect: it contains operations that model // TensorFlow's actions to launch computations on accelerator devices. #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_IR_TF_DEVICE_H_ #define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_IR_TF_DEVICE_H_ #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project #include "mlir/IR/Builders.h" // from @llvm-project #include "mlir/IR/BuiltinOps.h" // from @llvm-project
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 22 14:25:57 UTC 2022 - 2.2K bytes - Viewed (0) -
tensorflow/compiler/jit/device_util.cc
}; devices.ForEach([&](jit::DeviceId device) { if (device_info_cache.IsGpu(device)) { if (maybe_gpu_device) { multiple_gpu_devices = is_multiple_devices(device, &maybe_gpu_device); if (multiple_gpu_devices) return false; } else { maybe_gpu_device = device; } } else if (device_info_cache.IsCpu(device)) { if (maybe_cpu_device) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass_test_helper.h
copy.session_name = std::move(name); return copy; } }; // Runs the MarkForCompilation pass on `graph` after assigning all nodes in // `graph` to the CPU device. To make testing easier, ignores device // registration and _XlaCompile attributes. static Status MarkForCompilation(std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 09 19:51:48 UTC 2023 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_colocate_composite_resource_ops.cc
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" namespace mlir { namespace TFTPU { namespace { #define GEN_PASS_DEF_TPUCOLOCATECOMPOSITERESOURCEOPSPASS #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc" // Pass that co-locates resource ops that use composite device resources // (packed tensors) with the underlying physical TPU device. struct TPUColocateCompositeResourceOps : public impl::TPUColocateCompositeResourceOpsPassBase<
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 16 17:41:12 UTC 2023 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/launch_to_device_attribute.cc
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h" namespace mlir { namespace TFDevice { namespace { constexpr char kDeviceAttr[] = "device"; #define GEN_PASS_DEF_LAUNCHTODEVICEATTRIBUTEPASS #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_device_passes.h.inc" struct LaunchToDeviceAttributePass : public impl::LaunchToDeviceAttributePassBase<
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/hoist_broadcast_read.cc
kFuncDeviceAttr)) { std::string device = attr.getValue().str(); tensorflow::DeviceNameUtils::ParsedName parsed_name; if (!tensorflow::DeviceNameUtils::ParseFullName(device, &parsed_name)) { return read->emitOpError() << "invalid device '" << device << "'"; } is_cpu_read = parsed_name.type == kCpuDeviceType; return success(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/c/eager/immediate_execution_operation.h
virtual ImmediateExecutionContext* GetContext() const = 0; // Following two methods are used to support custom device. // Return true if the inputs contain custom device tensor handle. It means // that the argument need to be handled by a custom device. virtual bool HasCustomDeviceInput() const = 0; virtual const tensorflow::OpDef* OpDef() const = 0;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Sep 26 22:40:32 UTC 2022 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_util.h
#ifndef TENSORFLOW_COMPILER_JIT_XLA_COMPILE_UTIL_H_ #define TENSORFLOW_COMPILER_JIT_XLA_COMPILE_UTIL_H_ #include <memory> #include <string> #include "tensorflow/compiler/tf2xla/xla_argument.h" #include "tensorflow/core/graph/graph.h" namespace tensorflow { // The number of compiler threads to use for asynchronous device compilation. inline constexpr int64_t kNumAsyncDeviceCompilerThreads = 10;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 2.4K bytes - Viewed (0)