- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 226 for _xla (0.03 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.h
// This header file defines common utils used when transforming TF ops to XLA // ops. #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_UTILS_TF_TO_XLA_ATTRIBUTE_UTILS_H_ #define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_UTILS_TF_TO_XLA_ATTRIBUTE_UTILS_H_ #include "mlir/IR/Builders.h" // from @llvm-project namespace mlir::quant { // Caclulate padding values for XLA ops.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/passes.h
bool prefer_tf2xla = false); // Legalizes TF/XLA communication ops (TF dialect) to HLO dialect communication // ops. std::unique_ptr<OperationPass<ModuleOp>> CreateLegalizeTFCommunicationPass(); // Legalizes TF/XLA collective ops (TF dialect) to HLO dialect collective // ops. std::unique_ptr<OperationPass<ModuleOp>> CreateLegalizeTFCollectivePass(); // Verifies that the TF/XLA ops have all been lowered to MHLO.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.h
// which will be compiled and executed using XLA. The XlaLocalLaunchOp is // responsible for handling interactions with the TensorFlow executor. // Once all inputs are present, and their shapes are known, the op can // use a 'DeviceCompiler' to compile and execute code which is specific // to the shapes of input Tensors. // XlaLocalLaunchOp uses xla::LocalClient::Compile() and
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 16 23:44:26 UTC 2023 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.h
#include "mlir/Pass/Pass.h" // from @llvm-project #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "xla/client/compile_only_client.h" #include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace tf2xla { namespace internal { // Legalize the given MLIR module to XLA HLO using a combination of the MLIR // Bridge and XlaBuilder
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 20:29:34 UTC 2024 - 2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/compile_mlir_util/shape-inference.mlir
// RUN: tf-mlir-translate -mlir-tf-to-hlo-text %s -tf-input-shapes=10,17:17,19 -tf-xla-emit-use-tuple-args -tf-xla-emit-return-tuple | FileCheck %s // RUN: tf-mlir-translate -mlir-tf-to-hlo-text %s -tf-input-shapes=10,17:17,19 | FileCheck -check-prefix=NO_TUPLES %s // RUN: tf-mlir-translate -mlir-tf-to-hlo-text-via-builder %s -tf-input-shapes=10,17:17,19 | FileCheck -check-prefix=NO_TUPLES %s module attributes {tf.versions = {producer = 179 : i32}} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 23 18:56:13 UTC 2022 - 969 bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_tensor_buffer_util.cc
return shape.num_elements() * DataTypeSize(dtype); } absl::StatusOr<Tensor> MakeTensorFromPjRtBuffer( const DataType dtype, const TensorShape& shape, std::unique_ptr<xla::PjRtBuffer> pjrt_buffer) { TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::PjRtBuffer::ExternalReference> ref, pjrt_buffer->AcquireExternalReference()); auto* tensor_buffer = new PjRtTensorBuffer(ref->OpaqueDeviceMemoryDataPointer(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Sep 14 18:14:47 UTC 2023 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.cc
std::vector<tpu::ShardingAndIndex>* arg_core_mapping, std::vector<std::vector<xla::Shape>>* per_core_arg_shapes, std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes, xla::CompileOnlyClient* client, XlaCompilationResult* compilation_result) { LOG_FIRST_N(INFO, 1) << "Compiling MLIR computation to XLA HLO using the " "Combined MLIR Tf2Xla Bridge.";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 20:29:34 UTC 2024 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/xla_call_module_round_trip.mlir
// RUN: tf-opt %s -split-input-file -tf-xla-call-module-serialization -tf-xla-call-module-deserialization | FileCheck %s // Tests that running xla-call-module-serialization followed by // xla-call-module-deserialization preserves the original module. // // Note that function names may be different, but arguments, attributes, // results, and function body should be the same. // CHECK-LABEL: module module { // CHECK-LABEL: func @main
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 02 18:38:51 UTC 2023 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/compile_mlir_util/result-sharding.mlir
// RUN: tf-mlir-translate -mlir-tf-to-hlo-text %s -tf-input-shapes=128,10:10,1024:128,1024 -tf-xla-emit-use-tuple-args -tf-xla-emit-return-tuple | FileCheck %s module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 351 : i32}} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 23 18:56:13 UTC 2022 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/jit/ops/xla_ops_grad.py
@ops.RegisterGradient("XlaClusterOutput") def _XlaClusterOutputGrad(_, grad): del grad # unused raise RuntimeError("Gradient computation of graph in xla.compile() is " "prohibited because it can cause performance degradation."
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 1.1K bytes - Viewed (0)