- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 2,196 for Levine (0.19 sec)
-
tensorflow/c/experimental/grappler/grappler_internal.h
#include "tensorflow/core/platform/status.h" #include "tensorflow/core/protobuf/rewriter_config.pb.h" namespace tensorflow { namespace grappler { // Plugin initialization function that a device plugin // must define. typedef void (*TFInitGraphPluginFn)(TP_OptimizerRegistrationParams* const, TF_Status* const); // Registers Graph optimizers. Status InitGraphPlugin(void* dso_handle);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 08 08:58:23 UTC 2022 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_compile_util.h
// OpKernelContext in the above function. // - `device`: the device used to compile the function. // - `rm`: the resource manager for DeviceCompiler to store JIT-compiled XLA // computation. // - `flr`: the FunctionLibraryRuntime for the `function`. Status CompileToPjRtLoadedExecutable( const DeviceBase* device, const XlaPlatformInfo& platform_info, const NameAttrList& function,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_context.h
} // Returns a device-to-device stream, in round-robin fashion. se::Stream* GetDeviceToDeviceStream(); Status ThenExecute(Device* device, stream_executor::Stream* stream, std::function<void()> func) override; private: bool UseMultipleStreams() const { return stream_ != host_to_device_stream_; } // The main compute stream of the device, used to synchronize the transfer
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.h
#ifndef TENSORFLOW_COMPILER_JIT_GET_COMPILER_IR_H_ #define TENSORFLOW_COMPILER_JIT_GET_COMPILER_IR_H_ #include <string> #include <vector> #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/core/platform/statusor.h" namespace tensorflow { class ProcessFunctionLibraryRuntime; class Device; class Tensor; class TensorHandle;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.h
// such as TPUExecute or XlaExecute depending on the device type and specific // host runtime. Also does some optimization. Will return an error if it fails. // The output Runtime ops depends on both Device Type and Runtime Host. // // Input: // Tensorflow Dialect MLIR with tf_device.cluster ops and virtual devices. // xla_device_type - The device type that is being targeted. // Output:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 21:47:17 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/ir/mlrt/tf_mlrt_ops.h
}; // The MLIR type represents a tensorflow::Device* class TFDeviceType : public mlir::Type::TypeBase<TFDeviceType, mlir::Type, mlir::TypeStorage> { public: using Base::Base; static constexpr mlir::StringLiteral name = "tensorflow.tf_mlirt.tf_device"; }; } // namespace tf_mlrt } // namespace tensorflow #define GET_OP_CLASSES #include "tensorflow/compiler/mlir/tfrt/ir/mlrt/tf_mlrt_ops.h.inc"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 05 07:17:01 UTC 2023 - 2.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/replica_id_to_device_ordinal.cc
require_device_ordinal_ops.push_back(op); } }); if (require_device_ordinal_ops.size() == 1) { // If there is only one op which requires the device ordinal being set, // set the device ordinal to 0. Note: This is for single device use case // (eg. pf megacore) for which `_xla_replica_id` isn't set via the // replicate_to_islands pass. Operation* op = require_device_ordinal_ops.front();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 23:50:19 UTC 2022 - 4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/common/targets.h
return name; } // Get the target annotation form the op. inline std::optional<std::string> GetTargetAnnotation(Operation* op) { auto device = op->getAttrOfType<StringAttr>(kDevice); if (device == nullptr || device.getValue().empty()) return std::nullopt; return GetCanonicalHardwareName(device.getValue().str()); } // Get inference type attribute from the operation if available.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/device_index_selector.cc
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h" namespace mlir { namespace TF { namespace { #define GEN_PASS_DEF_DEVICEINDEXSELECTORPASS #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc" // Folds the DeviceIndex op to a constant value. The DeviceIndex return the // index of the device the op should run on. The user can use this to provide // different op specializations. E.g., // // ```mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 03 12:35:38 UTC 2022 - 3.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tensor_device_copy_conversion.cc
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h" namespace mlir { namespace TF { namespace { constexpr const char *kDeviceAttr = "device"; constexpr const char *kTFDeviceAttr = "tf.device"; #define GEN_PASS_DEF_TENSORDEVICECOPYCONVERSIONPASS #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc" struct TensorDeviceCopyConversionPass
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.3K bytes - Viewed (0)