Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 110 for tpu0 (0.08 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/tensor_device_copy_conversion.cc

                                            const StringAttr &op_device) {
        // In TFRT TPU, tensor transfer is handled specifically by D2H and
        // H2D transfer kernels. So fold the tf.Identity op if:
        // * the identity op is placed on TPU, and
        // * the arg to the identity op is produced by a TPUExecuteOp.
        if (op_device && op_device.getValue().contains("TPU")) {
          return true;
        }
    
        Operation *def_op = arg.getDefiningOp();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.h

    #include "xla/xla.pb.h"
    #include "xla/xla_data.pb.h"
    #include "tensorflow/core/framework/tensor_shape.pb.h"
    #include "tensorflow/core/framework/types.pb.h"
    #include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
    
    namespace mlir {
    namespace TFTPU {
    
    // Populates a TPUCompileMetadataProto from attributes of a
    // `tf_device::ClusterFuncOp`. If any necessary attributes are missing from the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 12 04:22:33 UTC 2023
    - 1.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfrt/ir/BUILD

        compatible_with = get_compatible_with_portable(),  # copybara: comment
        visibility = [
            # copybara:uncomment "//learning/brain/experimental/tfrt:__subpackages__",
            # copybara:uncomment "//learning/brain/tfrt/tpu/compiler:__subpackages__",
            "//tensorflow/compiler/mlir/tfrt:__subpackages__",
            "//tensorflow/core/runtime_fallback:internal",
            "//tensorflow/core/tfrt/saved_model:friends",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 27 20:43:07 UTC 2024
    - 7.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/utils/tpu_cluster_util.h

    #include <string>
    
    #include "mlir/Support/LogicalResult.h"  // from @llvm-project
    #include "tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.h"
    
    namespace mlir {
    namespace TFTPU {
    
    // For each TPU cluster in `module`, walk over all ops inside the cluster
    // and reachable in the call graph from the cluster.
    // For each op walked, `callback` is applied to the op, the root cluster, and
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 04:50:13 UTC 2023
    - 2.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/tpu_host_computation_expansion.cc

      return llvm::isa<TF::CastOp, TF::IdentityOp>(op);
    }
    
    // Adds outside compilation attributes to unary ops such as Identity/Cast ops
    // at the head of TPU computation that is used only by other outside compiled
    // ops. Identity ops and Cast ops is commonly added to the start of TPU
    // computation. Adding/expanding outside compilation attributes to these ops
    // will ensure that head outside compiled ops are correctly located and moved to
    // host.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.h

    // _tpu_replicate or _xla_compile_device_type=TPU. The function library will be
    // skipped if nullptr is provided.
    
    bool IsSupportedByReplicatedBridge(
        const Graph& graph, const FunctionLibraryDefinition* function_library);
    
    // Check if an MLIR module has any ops with _tpu_replicate or
    // _xla_compile_device_type=TPU.
    bool IsSupportedByReplicatedBridge(mlir::ModuleOp module);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 16:33:22 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/cannonicalize_ops_outside_compilation.mlir

    // RUN: tf-opt %s -tf-replicated-clustering-bridge-v2 -tfrt-lower-cluster-to-runtime-ops-tpu 2>&1 | FileCheck %s
    
    // This test verifies that the tail extraction is not terminated prematurely
    // due to the outside compilation attribute could be removed in
    // canonicalization of Reshape ops.
    
    // Reshape should not be executed on TPU as all are marked by outside
    // compilation. And there should be no host-device communication.
    // CHECK: tf._TPUCompileMlir
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 21:23:47 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/canonicalize_compile_and_replicate_attributes.mlir

        %control = tf_executor.island wraps "tf.TPUReplicateMetadata"() {_tpu_replicate = "cluster", allow_soft_placement = false, computation_shape = [], device = "", device_assignment = [], host_compute_core...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 3.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_outline_tpu_island.cc

            // Handle replicated TPU case.
            islands_to_outline.push_back(island_op);
            break;
          }
          auto device_type =
              op.getAttrOfType<StringAttr>(TF::kCompileDeviceTypeAttr);
          if (device_type && device_type.getValue() == TF::kTpuDevice &&
              !op.hasAttrOfType<StringAttr>(TF::kReplicationInfoAttr)) {
            // Handle single-core TPU case (no `TPUReplicateMetadataOp`).
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/tpu_tail_with_tobool_op.mlir

    // RUN: tf-opt %s -tf-replicated-clustering-bridge-v2 -tfrt-lower-cluster-to-runtime-ops-tpu 2>&1 | FileCheck %s
    
    // This test verifies that the tail extraction is not terminated prematurely
    // in handling tf.If op which would end up with excessive host-device
    // communication.
    
    // In this test, all ops other than tf.Rank are marked with outside_compilation
    // . So the TPU program should contain tf.Rank op and there should be no
    // host-device communication.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 21:23:47 UTC 2024
    - 2.8K bytes
    - Viewed (0)
Back to top