Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 78 for tpu0 (0.09 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

    // Creates a pass that converts TPU models for CPU by removing TPU related ops
    // such as TPUPartitionedCall, TPUReplicatedOp, etc. The TF quantizer does not
    // work with models specifically designed for TPU, so this pass makes the input
    // TPU model compatible with the TF quantizer by rewriting the TPU ops. The
    // output model of this pass is expected to be ready for the TF quantizer.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc

    namespace tensorflow {
    namespace tf2xla {
    namespace v2 {
    
    using ::tensorflow::monitoring::testing::CellReader;
    using ::testing::Not;
    using ::testing::TestWithParam;
    using tpu::FunctionToHloArgs;
    using tpu::MlirToHloArgs;
    using tpu::ShardingAndIndex;
    using tpu::TPUCompileMetadataProto;
    
    static constexpr char kCompilationTimeStreamzName[] =
        "/tensorflow/core/tf2xla/api/v2/phase2_compilation_time";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 23:59:33 UTC 2024
    - 16.1K bytes
    - Viewed (0)
  3. hack/lib/test.sh

    # limitations under the License.
    
    # shellcheck disable=SC2034 # Variables sourced in other scripts.
    
    # A set of helpers for tests
    
    reset=$(tput sgr0)
    bold=$(tput bold)
    black=$(tput setaf 0)
    red=$(tput setaf 1)
    green=$(tput setaf 2)
    readonly reset bold black red green
    
    kube::test::clear_all() {
      if kube::test::if_supports_resource "rc" ; then
        # shellcheck disable=SC2154
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri Jul 07 16:46:34 UTC 2023
    - 15.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_variable_runtime_reformatting.cc

        });
        if (replicate)
          reshard_was_inserted |= HandleReplicateOp(while_op, replicate);
      });
      if (reshard_was_inserted)
        VLOG(1) << "tf-tpu-variable-runtime-reformatting inserted at least one "
                   "TPUReshardVariables";
      else
        VLOG(1) << "tf-tpu-variable-runtime-reformatting inserted no "
                   "TPUReshardVariables";
    }
    
    }  // namespace
    
    std::unique_ptr<OperationPass<ModuleOp>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 21.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util_test.cc

                            std::vector<int64_t>(),
                            "number of tasks from available TPU devices must be "
                            "'num_tasks' in 'topology' (1), got 2"),
            std::make_tuple(2, 1, TopologyWithMeshShapeAndTasks({1, 1, 1, 1}, 2, 2),
                            std::vector<int64_t>(),
                            "number of TPU devices available per task must be "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 26 09:37:10 UTC 2024
    - 46.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/tpu_dynamic_layout_pass.cc

      void runOnFunction(
          func::FuncOp func,
          const TF::ResourceAliasAnalysis::Info& resource_alias_analysis);
    
      StringRef getArgument() const final { return "tf-tpu-dynamic-layout-pass"; }
    
      StringRef getDescription() const final {
        return "Inserts TPU layout ops to determine layout at run time.";
      }
    };
    
    // Checks if the input producer op is supported in this transform. Right now, we
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/xla_platform_info.cc

        return absl::OkStatus();
      }
    
      // TFRT-TPU is used if device type is `DEVICE_TPU` and platform_info does not
      // have `xla_device_metadata`. This is used for TFRT-TPU when
      // BuildXlaDeviceCompiler() is called in GetCompilerIr(). Currently only
      // lowering to HLO is needed there and xla::LocalClient doesn't support
      // building the executable for TFRT-TPU and hence, is set to nullptr here.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 17:23:27 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.mlir

      // CHECK: %[[IDENTIFY:.*]] = "tf.Identity"(%[[SUBGRAPH_0]]#1) {device = ""} : (tensor<1024x3xf32>) -> tensor<1024x3xf32>
      // CHECK: %[[SUBGRAPH_1:.*]] = "tf.XlaCallModule"() <{Sout = [#tf_type.shape<1024x3>], {{.*}} ["CPU", "TPU"], {{.*}}}> {_entry_function = @_stablehlo_main_1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 39.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc

      return builder->create<mlir::TF::ConcatOp>(
          location, output_type, concat_dimension_op.getOutput(), inputs);
    }
    
    // For tile sharded inputs to TPU computation, inject split op between the
    // input values and TPU computation so that tiled input values are passed in
    // as inputs to TPU computations. If more than one dimension is sharded, then
    // a tree of connected split ops are added before tf_device.parallel_execute op.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 21:28:13 UTC 2024
    - 34K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/embedding_sequencing.mlir

        return %0 : tensor<i1>
      }
      // Generated functions
      // non_tpu should have to TPU ops - just identity and return (in this test).
      // CHECK: func.func private @_func_non_tpu
      // CHECK-NEXT: tf.Identity
      // CHECK-NEXT: return
    
      // sc_forward should have TPU ops including replicated outputs but not inputs
      // CHECK: func.func private @_func_sc_forward
      // CHECK-NOT: TPUReplicatedInput
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 01 21:27:49 UTC 2023
    - 19.1K bytes
    - Viewed (0)
Back to top