- Sort Score
- Result 10 results
- Languages All
Results 101 - 110 of 215 for tpu0 (0.23 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/tests/convert_tpu_model_to_cpu.mlir
// RUN: tf-quant-opt %s -quant-convert-tpu-model-to-cpu -inline -quant-cast-bf16-ops-to-f32 -split-input-file | \ // RUN: FileCheck %s // Remove TPU related ops. func.func @tpu_conv(%arg0: tensor<1x3x4x3xf32>) -> tensor<1x3x2x2xf32> { %0 = "tf.TPUOrdinalSelector"() {device = ""} : () -> tensor<?xi32> %1 = "tf.TPUPartitionedCall"(%arg0, %0) {autotuner_thresh = 0 : i64, device = "", f = @tpu_func_0_optim0} : (tensor<1x3x4x3xf32>, tensor<?xi32>) -> tensor<1x3x2x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.mlir
// CHECK: %[[IDENTIFY:.*]] = "tf.Identity"(%[[SUBGRAPH_0]]#1) {device = ""} : (tensor<1024x3xf32>) -> tensor<1024x3xf32> // CHECK: %[[SUBGRAPH_1:.*]] = "tf.XlaCallModule"() <{Sout = [#tf_type.shape<1024x3>], {{.*}} ["CPU", "TPU"], {{.*}}}> {_entry_function = @_stablehlo_main_1
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 01:09:50 UTC 2024 - 39.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.cc
using mlir::LogicalResult; using mlir::ModuleOp; using mlir::OpPassManager; using mlir::PassManager; using mlir::func::FuncOp; // Run the TF XLA Bridge based on the input pipeline, which can be either TPU // bridge pipeline or non TPU bridge pipeline. tensorflow::Status RunTFXLABridge( ModuleOp module, llvm::function_ref<void(OpPassManager &pm)> pipeline_builder, llvm::StringRef module_name = llvm::StringRef(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 28 22:25:18 UTC 2024 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/runtime_lowering_gpu.mlir
// RUN: tf-tfrt-opt -tfrt-lower-cluster-to-runtime-ops-non-tpu -split-input-file -verify-diagnostics %s | FileCheck %s module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:TPU_SYSTEM:0", "/job:worker/replica:0/task:0/device:GPU:0"]} { // CHECK-LABEL: @converts_cluster func.func @converts_cluster() { // CHECK: "tf.XlaLaunch"()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Oct 13 17:41:44 UTC 2023 - 840 bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.cc
WeightOnlyPtqComponent::kName, *function_aliases, *ctx, *module)); // Remove the `tpu` tag for exporting because the output quantized model is // essentially a CPU model. tags.erase("tpu"); py_function_library.SaveExportedModel( dst_saved_model_path, post_calibrated_exported_model, src_saved_model_path, tags, signature_def_map);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 02:59:01 UTC 2024 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc
return builder->create<mlir::TF::ConcatOp>( location, output_type, concat_dimension_op.getOutput(), inputs); } // For tile sharded inputs to TPU computation, inject split op between the // input values and TPU computation so that tiled input values are passed in // as inputs to TPU computations. If more than one dimension is sharded, then // a tree of connected split ops are added before tf_device.parallel_execute op.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 22 21:28:13 UTC 2024 - 34K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/static_range_ptq.cc
PostCalibrationComponent::kName, *function_aliases, *ctx, *module)); // Remove the `tpu` tag for exporting because the output quantized model is // essentially a CPU model. tags.erase("tpu"); py_function_library.SaveExportedModel( dst_saved_model_path, post_calibrated_exported_model, src_saved_model_path, tags, signature_def_map);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 12:49:45 UTC 2024 - 6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/testdata/spmd.mlir
"tf_device.cluster_func"(%arg0) <{func = @empty_func}> {_dynamic_arg_index = [], _replication_info = "cluster", _xla_compile_device_type = "TPU", allow_soft_placement = false, computation_shape = [], device = "", device_assignment = [0, 0, 0, 0, 0, 0, 0, 1], host_compute_core = [], input_sharding_configuration = ["{devices=[2,1]0,1}"], num_cores_per_replica = 2 : i64, output_sharding_configuration...
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 12 04:22:33 UTC 2023 - 1.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_colocate_composite_resource_ops.mlir
// RUN: tf-opt %s -tf-tpu-colocate-composite-resource-ops | FileCheck %s // Tests ReadVariable op using composite device resource is wrapped inside // tf_device.Cluster. // CHECK-LABEL: func @testReadVariableOpColocated // CHECK-SAME: (%[[ARG0:.*]]: tensor<*x!tf_type.resource<tensor<4xf32>>>) func.func @testReadVariableOpColocated(%arg0: tensor<*x!tf_type.resource<tensor<4xf32>>>) { // CHECK: tf_device.replicate
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/move_tpu_compile_to_front.mlir
// RUN: tf-opt %s -allow-unregistered-dialect --tf-move-tpu-compile-to-front --split-input-file | FileCheck %s module { // CHECK-LABEL: does_basic_reordering func.func @does_basic_reordering() -> () { // CHECK: _TPUCompileMlir // CHECK-SAME: X // CHECK: _TPUCompileMlir // CHECK-SAME: Y // CHECK: OpA // CHECK: OpB // CHECK: OpC "tf.OpA"() : () -> ()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 17 00:26:18 UTC 2023 - 1.8K bytes - Viewed (0)