- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 13 for tpu0 (0.47 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/tpu_cluster_formation.mlir
%3 = "tf.ReadVariableOp"(%0) {_xla_compile_device_type = "TPU", _replication_info = "a"} : (!tf_res) -> tensor<f32> %4 = "tf.ReadVariableOp"(%1) {_xla_compile_device_type = "TPU", _replication_info = "b"} : (!tf_res) -> tensor<f32> %5 = "tf.ReadVariableOp"(%2) {_xla_compile_device_type = "TPU", _replication_info = "c"} : (!tf_res) -> tensor<f32> %6 = "tf.Identity"(%3) {_xla_compile_device_type = "TPU", _replication_info = "a"} : (tensor<f32>) -> tensor<f32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 22:03:30 UTC 2024 - 53.9K bytes - Viewed (0) -
.bazelrc
# ARM64 WHEEL test:linux_arm64_wheel_test_filters --test_tag_filters=-no_oss,-no_aarch64,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_oss_py38,-no_oss_py39,-no_oss_py310
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 17:12:54 UTC 2024 - 52.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td
let summary = "Op that compiles a computation in MLIR into a TPU program, and loads and executes it on a TPU device."; let description = [{ For the internal use of the TPU compiler. 'static_shapes' are tensors specifying the maximum dimension sizes for the tensors specified in `dynamic_operands`. 'args' are inputs to the TPU computation.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 04:08:35 UTC 2024 - 90.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc
// For all other inputs that go from TPU op to TPU op, insert the // TPUOutput/Input pair. // Add TPUReplicatedInput/TPUReplicatedOutput pairs along each edge. // TODO(bfontain): Should be merged with the above loop. llvm::SetVector<Value> values_to_add_nodes; for (Operation* op : backward_pass_ops) { // TODO(bfontain): Should validate that all the TPU ops are in the same // replication region.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 92.9K bytes - Viewed (0) -
src/runtime/mbitmap.go
return } tp0 := s.typePointersOfType(typ, addr) tp1 := s.typePointersOf(addr, size) failed := false for { var addr0, addr1 uintptr tp0, addr0 = tp0.next(addr + size) tp1, addr1 = tp1.next(addr + size) if addr0 != addr1 { failed = true break } if addr0 == 0 { break } } if failed { tp0 := s.typePointersOfType(typ, addr)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0) -
tensorflow/compiler/jit/BUILD
]), alwayslink = 1, ) cc_library( name = "xla_tpu_jit", visibility = ["//visibility:public"], deps = if_libtpu([ "//tensorflow/core/tpu/graph_rewrite:tpu_rewrite_pass_registration", "//tensorflow/core/tpu/graph_rewrite:configure_tpu_embedding_rewrite_registration", "@local_xla//xla/stream_executor/tpu:tpu_transfer_manager", ]), alwayslink = 1, ) cc_library(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 61.5K bytes - Viewed (0) -
hack/local-up-cluster.sh
ETCD_PID= fi } function print_color { message=$1 prefix=${2:+$2: } # add colon only if defined color=${3:-1} # default is red echo -n "$(tput bold)$(tput setaf "${color}")" echo "${prefix}${message}" echo -n "$(tput sgr0)" } function warning_log { print_color "$1" "W$(date "+%m%d %H:%M:%S")]" 1 } function start_etcd { echo "Starting etcd"
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 25 02:33:52 UTC 2024 - 53.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc
// _XlaSendFromHost but the _XlaCompileMlir has not yet been created for device // cluster that contains the outside compiled ops. This placeholder should be // replaced by the TPU cluster _XlaCompileMlir in a subsequent pass. mlir::TF::_XlaCompileMlirPlaceholderProgramKeyOp CreateCompilationKeyPlaceholder(Location loc, OpBuilder& builder) { auto result_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 68.3K bytes - Viewed (0) -
tensorflow/BUILD
config_setting( name = "disable_mlir_bridge", define_values = {"enable_mlir_bridge": "false"}, visibility = ["//visibility:public"], ) # This flag enables experimental TPU support bool_flag( name = "enable_tpu_support", build_setting_default = False, ) config_setting( name = "with_tpu_support_define", define_values = {"with_tpu_support": "true"},
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 16:51:59 UTC 2024 - 53.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/decompose_resource_ops.mlir
"tf.ResourceApplyFtrl"(%var, %accum, %linear, %grad, %lr, %l1, %l2, %lr_power) {_xla_compile_device_type = "TPU", _replication_info = "cluster_train_function", device = "", multiply_linear_by_lr = false, use_locking = true} : (tensor<*x!tf_type.resource>, tensor<*x!tf_type.resource>, tensor<*x!tf_type.resource>, tensor<*xf32>, tensor<*xf32>, tensor<*xf32>, tensor<*xf32>,...
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 22 19:47:48 UTC 2024 - 51.3K bytes - Viewed (0)