- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 245 for tpu0 (0.14 sec)
-
tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.h
// _tpu_replicate or _xla_compile_device_type=TPU. The function library will be // skipped if nullptr is provided. bool IsSupportedByReplicatedBridge( const Graph& graph, const FunctionLibraryDefinition* function_library); // Check if an MLIR module has any ops with _tpu_replicate or // _xla_compile_device_type=TPU. bool IsSupportedByReplicatedBridge(mlir::ModuleOp module);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 16:33:22 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/cannonicalize_ops_outside_compilation.mlir
// RUN: tf-opt %s -tf-replicated-clustering-bridge-v2 -tfrt-lower-cluster-to-runtime-ops-tpu 2>&1 | FileCheck %s // This test verifies that the tail extraction is not terminated prematurely // due to the outside compilation attribute could be removed in // canonicalization of Reshape ops. // Reshape should not be executed on TPU as all are marked by outside // compilation. And there should be no host-device communication. // CHECK: tf._TPUCompileMlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 21:23:47 UTC 2024 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_head_tail_outside_compilation.cc
} // Returns a set of ops that are outside compiled and can be extracted to before // the TPU computation. These ops are either connected to the inputs of the TPU // computation or other ops that can be extracted, and have no operands from // other ops in the TPU computation that cannot be extracted. llvm::SmallVector<Operation*, 4> FindOutsideCompiledOpsAtHead(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
// Creates a pass that converts TPU models for CPU by removing TPU related ops // such as TPUPartitionedCall, TPUReplicatedOp, etc. The TF quantizer does not // work with models specifically designed for TPU, so this pass makes the input // TPU model compatible with the TF quantizer by rewriting the TPU ops. The // output model of this pass is expected to be ready for the TF quantizer.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/canonicalize_compile_and_replicate_attributes.mlir
%control = tf_executor.island wraps "tf.TPUReplicateMetadata"() {_tpu_replicate = "cluster", allow_soft_placement = false, computation_shape = [], device = "", device_assignment = [], host_compute_core...
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc
namespace tensorflow { namespace tf2xla { namespace v2 { using ::tensorflow::monitoring::testing::CellReader; using ::testing::Not; using ::testing::TestWithParam; using tpu::FunctionToHloArgs; using tpu::MlirToHloArgs; using tpu::ShardingAndIndex; using tpu::TPUCompileMetadataProto; static constexpr char kCompilationTimeStreamzName[] = "/tensorflow/core/tf2xla/api/v2/phase2_compilation_time";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 13 23:59:33 UTC 2024 - 16.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_tail_with_tobool_op.mlir
// RUN: tf-opt %s -tf-replicated-clustering-bridge-v2 -tfrt-lower-cluster-to-runtime-ops-tpu 2>&1 | FileCheck %s // This test verifies that the tail extraction is not terminated prematurely // in handling tf.If op which would end up with excessive host-device // communication. // In this test, all ops other than tf.Rank are marked with outside_compilation // . So the TPU program should contain tf.Rank op and there should be no // host-device communication.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 21:23:47 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/infeed_ops_xla_adjust_layout.cc
#include "tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.h" #include "xla/layout.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/shape.h" #include "xla/stream_executor/tpu/c_api_conversions.h" #include "xla/stream_executor/tpu/tpu_api.h" #include "xla/translate/mhlo_to_hlo/type_to_shape.h" namespace mlir { namespace mhlo { namespace { #define GEN_PASS_DEF_INFEEDOPSXLAADJUSTLAYOUT
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/testdata/malformed_cluster.mlir
func.func @missing_num_cores_per_replica() { // expected-error@+1 {{requires attribute 'num_cores_per_replica'}}
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 17 16:43:16 UTC 2023 - 742 bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_outline_tpu_island.cc
// Handle replicated TPU case. islands_to_outline.push_back(island_op); break; } auto device_type = op.getAttrOfType<StringAttr>(TF::kCompileDeviceTypeAttr); if (device_type && device_type.getValue() == TF::kTpuDevice && !op.hasAttrOfType<StringAttr>(TF::kReplicationInfoAttr)) { // Handle single-core TPU case (no `TPUReplicateMetadataOp`).
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.7K bytes - Viewed (0)