- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 17 for _XlaHostComputeMlir (0.34 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/extract_outside_compilation.mlir
// CHECK: %[[HOST_OUTPUT:[0-9]*]] = "tf._XlaHostComputeMlir"(%[[A_OUTPUT]]) // CHECK: %[[D_OUTPUT:[0-9]*]] = "tf.D"(%[[HOST_OUTPUT]]) // CHECK: %[[HOST_OUTPUT2:[0-9]*]] = "tf._XlaHostComputeMlir"(%[[B_OUTPUT]]) // CHECK: %[[HOST_OUTPUT3:[0-9]*]] = "tf._XlaHostComputeMlir"() // CHECK: %[[HOST_OUTPUT4:[0-9]*]] = "tf._XlaHostComputeMlir"(%[[HOST_OUTPUT3]], %[[HOST_OUTPUT2]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 129.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/prepare_tpu_computation_for_tf_export.mlir
} // ----- // _XlaHostComputeMlir with manual_sharding should not fall back to // XlaHostCompute, because XlaHostCompute does not support manual_sharding. // Instead, it is skipped and the MlirXlaOpKernel is expected to handle it. func.func @HostComputeManualNoFallback(%arg0: tensor<i32>) -> () { // CHECK: "tf._XlaHostComputeMlir"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 18:46:36 UTC 2024 - 9.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-communication.mlir
// CHECK: "mhlo.recv"([[INIT_TOKEN]]) %0 = "tf._XlaHostComputeMlir"() {recv_key = "host_compute_channel_recv", send_key = "host_compute_channel_send", host_mlir_module = ""} : () -> tensor<i32> func.return } // ----- // Tests `tf._XlaHostComputeMlir` with no results simply forwards its token from // the generated `mhlo.send`. // CHECK-LABEL: func @host_compute_one_operand_no_results
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 08 18:24:20 UTC 2024 - 38.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_remaining_ops.cc
if (op->getNumOperands() != func.getFunctionType().getNumInputs()) return op.emitError() << "'host_func' has " << func.getFunctionType().getNumInputs() << " inputs and '_XlaHostComputeMlir' has " << op->getNumOperands() << " operands. Number of operands/inputs should be the same."; if (op->getNumResults() != func.getFunctionType().getNumResults())
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 14 20:05:58 UTC 2023 - 7.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc
%control_4 = tf_executor.island wraps "tf._XlaHostComputeMlir"(%outputs_2) {host_mlir_module = "", manual_sharding = true, recv_key = "host_compute_channel_0_retvals", send_key = "host_compute_channel_0_args"} : (tensor<1x2xf32>) -> ()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 08:08:57 UTC 2024 - 11.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/cannonicalize_ops_outside_compilation.mlir
// Reshape should not be executed on TPU as all are marked by outside // compilation. And there should be no host-device communication. // CHECK: tf._TPUCompileMlir // CHECK-NOT: tf.Reshape // CHECK-NOT: tf._XlaHostComputeMlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 21:23:47 UTC 2024 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/testdata/prepare_to_library.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jan 31 23:44:50 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_tail_with_tobool_op.mlir
// . So the TPU program should contain tf.Rank op and there should be no // host-device communication. // CHECK: tf._TPUCompileMlir // CHECK-SAME: tf.Rank // CHECK-NOT: tf._XlaHostComputeMlir // CHECK-NOT: tf._XlaRecvAtHost // CHECK-NOT: tf._XlaSendFromHost
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 21:23:47 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc
} // Create an `_XlaHostComputeMlir` for the map_outside_compilation case. Inputs // are converted from split sharding to MANUAL sharding and outputs are // converted from MANUAL sharding to split sharding. Set `common_split_sharding` // if it has not yet been set. Output `full_outputs`, which is the outputs of // the `_XlaHostComputeMlir` and add the // `_XlaHostComputeMlir` to `host_compute_out_ops`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 68.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td
tf_device.return }, { %1 = "tf_device.cluster"() ( { %2 = "tf.Const"() {value = dense<1.000000e+00> : tensor<f32>} : () -> tensor<f32> %3 = "tf._XlaHostComputeMlir"(%2) {recv_key = "host_compute_channel_0_0_retvals", send_key = "host_compute_channel_0_0_args", tpu_core = 0 : i64} : (tensor<f32>) -> tensor<f32> %4 = "tf.AddV2"(%2, %3) : (tensor<f32>, tensor<f32>) -> tensor<f32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 19.8K bytes - Viewed (0)