- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 53 for StatefulPartitionedCall (0.25 sec)
-
tensorflow/compiler/jit/encapsulate_xla_computations_pass.h
// When add_edges_to_output_of_downstream_nodes is true, the output edges of // the xla_launch_node's immediate downstream nodes would be attached to the // generated xla node. For example, if the original graph is // StatefulPartitionedCall{_xla_compile_id=1} -> XlaClusterOutput -> NodeA // The output graph of this function would look like the following when // add_edges_to_output_of_downstream_nodes is true: // XlaLaunch -> NodeA
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/end-to-end-tpu-reshard-variables.mlir
tf_executor.graph { %control = tf_executor.island { "tf.StatefulPartitionedCall"(%arg0) <{config = "", config_proto = "", executor_type = "", f = @partitioned}> : (tensor<*x!tf_type.resource>) -> () tf_executor.yield } tf_executor.fetch %control : !tf_executor.control }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 21:23:47 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/components/tf_to_stablehlo.mlir
// ----- // This test makes sure functions with tf._noinline=true is not inlined. module { func.func @stateful_partitioned_call(%arg0: tensor<1x2x2x3xf32>) -> (tensor<1x2x2x3xf32>) { %0 = "tf.StatefulPartitionedCall"(%arg0) <{ config = "", config_proto = "", executor_type = "", f = @some_func }> { _collective_manager_ids = [], device = "" } : (tensor<1x2x2x3xf32>) -> tensor<1x2x2x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 20:05:12 UTC 2024 - 13.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_freeze_assets.mlir
// CHECK: func @f(%arg0 func.func @f(%arg0: tensor<!tf_type.string> {tf_saved_model.bound_input = @v}) attributes {tf_saved_model.exported_names = ["f"]} { "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_callee} : (tensor<!tf_type.string>) -> () func.return } func.func private @f_callee(%arg0: tensor<!tf_type.string>) { func.return
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td
def XlaClusterFormationPass : Pass<"tf-xla-cluster-formation", "ModuleOp"> { let summary = "Encapsulate partitioned calls within a Cluster op"; let description = [{ This pass clusters `tf.PartitionedCall` and `tf.StatefulPartitionedCall` with `_xla_compile_device_type` attribute into a `tf_device.cluster`. Notice this pass will only rewrite the outermost call if there are nested
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 19.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tensor_list_ops_decomposition.mlir
%tl = "tf.EmptyTensorList"(%elem_shape, %max_size) : (tensor<0xi32>, tensor<i32>) -> tensor<!tf_type.variant<tensor<f32>>> // CHECK: "tf.StatefulPartitionedCall"(%[[INIT]], // CHECK-SAME: f = @callee_tensorlist_decomposed %call = "tf.StatefulPartitionedCall"(%tl, %arg0) {f = @callee, config = "", config_proto = "", executor_type = ""}
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 38.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tensor_array_ops_decomposition.mlir
// CHECK: %[[GVAR2:.*]] = "tf.MlirLocalVarOp"() : () -> tensor<!tf_type.resource<tensor<5x3xf32>>> // CHECK: "tf.StatefulPartitionedCall"(%[[VAR]], %[[GVAR1]], %[[GVAR2]]) // CHECK-SAME: f = @callee_tensorarray_decomposed %call = "tf.StatefulPartitionedCall"(%ta#0) {f = @callee, config = "", config_proto = "", executor_type = ""} : (tensor<!tf_type.resource>) -> tensor<!tf_type.resource>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 49K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_device_passes.td
let summary = "Rewrites partition calls into Xla launch ops to make the attached function run on XLA."; let description = [{ This pass rewrites `tf.PartitionedCall` and `tf.StatefulPartitionedCall` operations with `_xla_compile_device_type` attribute in a `tf_device.cluster` into `tf.XlaLaunch` operations. This makes the attached function execute with XLA. `tf.XlaLaunch` requires resource-type arguments
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/resource_op_lifting.mlir
// CHECK: %[[CLUSTER:.*]] = "tf_device.cluster"() "tf_device.cluster"() ({ // CHECK: %[[PC0:.*]] = "tf.StatefulPartitionedCall"(%[[READ0]], %[[READ1]], %[[CONST]]) // CHECK-SAME: f = @callee_resource_lifted %3 = "tf.StatefulPartitionedCall"(%0, %1, %2) {f = @callee, config = "", config_proto = "", executor_type = ""}
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 74K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.cc
bool IsNonReplicatedGraph(const Graph& graph, const FunctionLibraryDefinition* function_library) { auto predicate = [](const Graph& graph) { const std::string kStatefulPartitionedCallOp = "StatefulPartitionedCall"; for (const Node* node : graph.nodes()) { auto node_op = node->type_string(); if (node_op == kStatefulPartitionedCallOp) { // Functions called by StatefulfulPartitionedCall ops with
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 12:22:33 UTC 2024 - 8.9K bytes - Viewed (0)