Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 148 for tpu0 (0.09 sec)

  1. tensorflow/compiler/mlir/tf2xla/api/v2/BUILD

            "//tensorflow/core:lib",
            "//tensorflow/core/platform:statusor",
            "//tensorflow/core/tpu:tpu_compile",
            "//tensorflow/core/tpu/kernels:tpu_compile_op_support",
            "//tensorflow/core/tpu/kernels:tpu_compile_proto_cc",
            "//tensorflow/core/tpu/kernels:tpu_util_hdrs",
            "@com_google_absl//absl/log",
            "@com_google_absl//absl/status",
            "@com_google_absl//absl/strings",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 23:04:51 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_rewrite_pass.cc

    #include "tensorflow/core/framework/types.pb.h"
    #include "tensorflow/core/lib/core/status.h"
    #include "tensorflow/core/platform/fingerprint.h"
    #include "tensorflow/core/platform/protobuf.h"
    #include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
    #include "tensorflow/core/util/device_name_utils.h"
    
    namespace mlir {
    namespace TFTPU {
    
    constexpr char kStepMarkerLocationAttr[] = "step_marker_location";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 21:25:12 UTC 2024
    - 29.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/tpu_device_propagation.cc

      }
    }
    
    // Checks if an operation can have TPU devices propagated through.
    bool IsSupportedOpToSetDevice(Operation& op) {
      return IsSupportedExecutorOp(op) ||
             isa<TF::IdentityOp, TF::IdentityNOp, TF::ShapeOp>(op);
    }
    
    // Finds nonconflicting TPU device for an operation from its operands. If an
    // operand has no device or a non TPU device, or if there are conflicting
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/passes.h

    CreateTFExecutorIslandCoarseningPass();
    
    // Creates a pass to merge IslandOps for operation marked for execution on TPU.
    // This is a V1 backward compatibility.
    std::unique_ptr<OperationPass<ModuleOp>>
    CreateTFExecutorTPUV1IslandCoarseningPass();
    
    // Creates a pass to outlining TPU clusters from single IslandOp into a nested
    // module suitable for being processed as-if it was a V2 module.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 31.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfrt/ir/BUILD

        compatible_with = get_compatible_with_portable(),  # copybara: comment
        visibility = [
            # copybara:uncomment "//learning/brain/experimental/tfrt:__subpackages__",
            # copybara:uncomment "//learning/brain/tfrt/tpu/compiler:__subpackages__",
            "//tensorflow/compiler/mlir/tfrt:__subpackages__",
            "//tensorflow/core/runtime_fallback:internal",
            "//tensorflow/core/tfrt/saved_model:friends",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 27 20:43:07 UTC 2024
    - 7.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/tpu_host_computation_expansion.cc

      return llvm::isa<TF::CastOp, TF::IdentityOp>(op);
    }
    
    // Adds outside compilation attributes to unary ops such as Identity/Cast ops
    // at the head of TPU computation that is used only by other outside compiled
    // ops. Identity ops and Cast ops is commonly added to the start of TPU
    // computation. Adding/expanding outside compilation attributes to these ops
    // will ensure that head outside compiled ops are correctly located and moved to
    // host.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.h

    // _tpu_replicate or _xla_compile_device_type=TPU. The function library will be
    // skipped if nullptr is provided.
    
    bool IsSupportedByReplicatedBridge(
        const Graph& graph, const FunctionLibraryDefinition* function_library);
    
    // Check if an MLIR module has any ops with _tpu_replicate or
    // _xla_compile_device_type=TPU.
    bool IsSupportedByReplicatedBridge(mlir::ModuleOp module);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 16:33:22 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/cannonicalize_ops_outside_compilation.mlir

    // RUN: tf-opt %s -tf-replicated-clustering-bridge-v2 -tfrt-lower-cluster-to-runtime-ops-tpu 2>&1 | FileCheck %s
    
    // This test verifies that the tail extraction is not terminated prematurely
    // due to the outside compilation attribute could be removed in
    // canonicalization of Reshape ops.
    
    // Reshape should not be executed on TPU as all are marked by outside
    // compilation. And there should be no host-device communication.
    // CHECK: tf._TPUCompileMlir
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 21:23:47 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/internal/passes/extract_head_tail_outside_compilation.cc

    }
    
    // Returns a set of ops that are outside compiled and can be extracted to before
    // the TPU computation. These ops are either connected to the inputs of the TPU
    // computation or other ops that can be extracted, and have no operands from
    // other ops in the TPU computation that cannot be extracted.
    llvm::SmallVector<Operation*, 4> FindOutsideCompiledOpsAtHead(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

    // Creates a pass that converts TPU models for CPU by removing TPU related ops
    // such as TPUPartitionedCall, TPUReplicatedOp, etc. The TF quantizer does not
    // work with models specifically designed for TPU, so this pass makes the input
    // TPU model compatible with the TF quantizer by rewriting the TPU ops. The
    // output model of this pass is expected to be ready for the TF quantizer.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
Back to top