- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 23 for xla_run (0.61 sec)
-
tensorflow/compiler/jit/build_xla_ops_pass.cc
// "Strict" compilation: every _XlaCompile invocation must compile the // cluster. ops::_XlaRun xla_run(root.WithOpName("xla_run"), xla_run_args, xla_compile.key, n->output_types()); MoveOutgoingEdges(g, /*old_node=*/n, /*new_node=*/xla_run.operation.node()); g->RemoveNode(n); } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/jit/build_xla_ops_pass_test.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 12.2K bytes - Viewed (0) -
tensorflow/compiler/jit/ops/xla_ops.cc
REGISTER_OP("_XlaMerge") .Input("partitioned_call: T") .Input("xla_run: T") .Output("output: T") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->input(0)); return absl::OkStatus(); }) .Doc(R"(XLA Merge Op. For use by the XLA JIT only. Merges the outputs from the PartitionedCall node and the _XlaRun node.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 09:08:06 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/jit/variable_info.h
VariableInfo(const VariableInfo&) = delete; VariableInfo& operator=(const VariableInfo&) = delete; // The index of the DT_RESOURCE input to the _XlaCompile/_XlaRun operator. // Note that the indices can be different between _XlaCompile and _XlaRun. int index() const { return index_; } // A pointer to the resource variable. May be null if this VariableInfo is // "empty", i.e. it does not track a resource variable.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 14 21:57:02 UTC 2023 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/tfxla_device_specific_transforms.cc
TF::StatelessRandomGetAlgOp get_alg_op) { if (!device_type_.hasValue()) return failure(); xla::RandomAlgorithm xla_rng = tensorflow::DefaultRngAlgForDeviceType(device_type_); tensorflow::Algorithm tensorflow_rng = tensorflow::ToTensorflowAlgorithm(xla_rng); OpBuilder opbuilder(get_alg_op); auto tf_const = opbuilder.create<TF::ConstOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 05:56:39 UTC 2023 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_side_effects.h
: public ::mlir::SideEffects::Resource::Base<WriteTrainingPredictions> { StringRef getName() final { return "WriteTrainingPredictions"; } }; struct _XlaRun : public ::mlir::SideEffects::Resource::Base<_XlaRun> { StringRef getName() final { return "_XlaRun"; } }; // Returns true iff resource type with given ID is only self-dependent, i.e., // there are no dependencies to other resource types (including unknown resource
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 26 18:45:40 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_device.h
// These operations do not map 1-1 to TensorFlow ops and requires a lowering // pass later to transform them into Compile/Run op pairs, like XlaCompile and // XlaRun. class TensorFlowDeviceDialect : public Dialect { public: static StringRef getDialectNamespace() { return "tf_device"; } // Constructing TensorFlowDevice dialect under an non-null MLIRContext.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 22 14:25:57 UTC 2022 - 2.2K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.h
private: DeviceType device_type_; se::Platform::Id platform_id_; // xla_device_metadata_ lives in the tensorflow::DeviceBase in which the // XlaLaunch/_XlaCompile/_XlaRun op is placed and thus does not die before the // XlaLaunch/_XlaCompile/_XlaRun OpKernel. const XlaDevice::Metadata* xla_device_metadata_; // pjrt_device_metadata_ lives in tensorflow::PjRtBaseDevice in which the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.h
} // Allow using Device API (PjRt) for `device_type` in the XlaCompile and // XlaRun ops. Please note that `enabled_for_compile_and_run_` needs to be // true in addition to the `device_type` being allowed in order to use the // Device API for single device compilation and execution in the XlaCompile // and XlaRun ops. void AllowForDeviceInXlaCompileAndRun(const DeviceType& device_type) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 14.5K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
XlaCompileOp); REGISTER_KERNEL_BUILDER(Name("_XlaRun").Device(DEVICE_CPU), XlaRunOp); REGISTER_KERNEL_BUILDER(Name("_XlaRun").Device(DEVICE_GPU).HostMemory("key"), XlaRunOp); REGISTER_KERNEL_BUILDER( Name("_XlaRun").Device(DEVICE_DEFAULT).HostMemory("key"), XlaRunOp);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0)