Search Options

Results per page
Sort
Preferred Languages
Advance

Results 111 - 120 of 245 for tpu0 (0.04 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/replica_id_to_device_ordinal.cc

              getOperation()->getParentOfType<ModuleOp>(), &devices)))
        return signalPassFailure();
      for (const auto& device_name : devices.device_names()) {
        if (device_name.has_type && device_name.type == "TPU") ++device_num;
      }
    
      if (device_num == 0) return;
    
      llvm::SmallVector<Operation*, 4> require_device_ordinal_ops;
      getOperation().walk([&](Operation* op) {
        if (RequiresDeviceOrdinalAttribute(op)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 05 23:50:19 UTC 2022
    - 4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/tpu_dynamic_layout_pass.cc

      void runOnFunction(
          func::FuncOp func,
          const TF::ResourceAliasAnalysis::Info& resource_alias_analysis);
    
      StringRef getArgument() const final { return "tf-tpu-dynamic-layout-pass"; }
    
      StringRef getDescription() const final {
        return "Inserts TPU layout ops to determine layout at run time.";
      }
    };
    
    // Checks if the input producer op is supported in this transform. Right now, we
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_platform_info.cc

        return absl::OkStatus();
      }
    
      // TFRT-TPU is used if device type is `DEVICE_TPU` and platform_info does not
      // have `xla_device_metadata`. This is used for TFRT-TPU when
      // BuildXlaDeviceCompiler() is called in GetCompilerIr(). Currently only
      // lowering to HLO is needed there and xla::LocalClient doesn't support
      // building the executable for TFRT-TPU and hence, is set to nullptr here.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 17:23:27 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/export_main_to_flib.mlir

      func.func @main() attributes {tf.entry_function = {inputs = "", outputs = ""}} {
        tf_executor.graph {
          %0:2 = tf_executor.island wraps "tf.Const"() {device = "TPU:0", name = "const", dtype = "tfdtype$DT_INT32", value = dense<[1, 2]> : tensor<2xi32>} : () -> tensor<2xi32>
          tf_executor.fetch
        }
        func.return
      }
    }
    
    // CHECK-NOT: node
    
    // CHECK: library
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 12:06:33 UTC 2022
    - 699 bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/convert_tpu_model_to_cpu.mlir

    // RUN: tf-quant-opt %s -quant-convert-tpu-model-to-cpu -inline -quant-cast-bf16-ops-to-f32 -split-input-file | \
    // RUN: FileCheck %s
    
    // Remove TPU related ops.
    func.func @tpu_conv(%arg0: tensor<1x3x4x3xf32>) -> tensor<1x3x2x2xf32> {
      %0 = "tf.TPUOrdinalSelector"() {device = ""} : () -> tensor<?xi32>
      %1 = "tf.TPUPartitionedCall"(%arg0, %0) {autotuner_thresh = 0 : i64, device = "", f = @tpu_func_0_optim0} : (tensor<1x3x4x3xf32>, tensor<?xi32>) -> tensor<1x3x2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td

      let summary = "Op that compiles a computation in MLIR into a TPU program, and loads and executes it on a TPU device.";
    
      let description = [{
    For the internal use of the TPU compiler.
    
    'static_shapes' are tensors specifying the maximum dimension sizes for the tensors specified in `dynamic_operands`.
    'args' are inputs to the TPU computation.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 04:08:35 UTC 2024
    - 90.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.mlir

      // CHECK: %[[IDENTIFY:.*]] = "tf.Identity"(%[[SUBGRAPH_0]]#1) {device = ""} : (tensor<1024x3xf32>) -> tensor<1024x3xf32>
      // CHECK: %[[SUBGRAPH_1:.*]] = "tf.XlaCallModule"() <{Sout = [#tf_type.shape<1024x3>], {{.*}} ["CPU", "TPU"], {{.*}}}> {_entry_function = @_stablehlo_main_1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 39.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.cc

    using mlir::LogicalResult;
    using mlir::ModuleOp;
    using mlir::OpPassManager;
    using mlir::PassManager;
    using mlir::func::FuncOp;
    
    // Run the TF XLA Bridge based on the input pipeline, which can be either TPU
    // bridge pipeline or non TPU bridge pipeline.
    tensorflow::Status RunTFXLABridge(
        ModuleOp module,
        llvm::function_ref<void(OpPassManager &pm)> pipeline_builder,
        llvm::StringRef module_name = llvm::StringRef(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 28 22:25:18 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tfrt/tests/runtime_lowering_gpu.mlir

    // RUN: tf-tfrt-opt -tfrt-lower-cluster-to-runtime-ops-non-tpu -split-input-file -verify-diagnostics %s | FileCheck %s
    
    module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:TPU_SYSTEM:0", "/job:worker/replica:0/task:0/device:GPU:0"]} {
    
      // CHECK-LABEL: @converts_cluster
      func.func @converts_cluster() {
        // CHECK: "tf.XlaLaunch"()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Oct 13 17:41:44 UTC 2023
    - 840 bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/g3doc/overview.md

    *   Compiler researchers and implementers looking to optimize performance and
        memory consumption of machine learning models
    *   Hardware makers looking for a way to connect their hardware to TensorFlow,
        such as TPUs, portable neural hardware in phones, and other custom ASICs
    *   People writing language bindings that want to take advantage of optimizing
        compilers and hardware acceleration.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 21 01:37:38 UTC 2020
    - 1.8K bytes
    - Viewed (0)
Back to top