Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 71 for StatefulPartitionedCall (0.4 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_freeze_assets.mlir

      // CHECK: func @f(%arg0
      func.func @f(%arg0: tensor<!tf_type.string> {tf_saved_model.bound_input = @v})
      attributes {tf_saved_model.exported_names = ["f"]} {
        "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_callee} : (tensor<!tf_type.string>) -> ()
        func.return
      }
    
      func.func private @f_callee(%arg0: tensor<!tf_type.string>) {
        func.return
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td

    def XlaClusterFormationPass : Pass<"tf-xla-cluster-formation", "ModuleOp"> {
      let summary = "Encapsulate partitioned calls within a Cluster op";
      let description = [{
        This pass clusters `tf.PartitionedCall` and `tf.StatefulPartitionedCall`
        with `_xla_compile_device_type` attribute into a `tf_device.cluster`.
        Notice this pass will only rewrite the outermost call if there are nested
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 02:01:13 UTC 2024
    - 19.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/tensor_list_ops_decomposition.mlir

      %tl = "tf.EmptyTensorList"(%elem_shape, %max_size) : (tensor<0xi32>, tensor<i32>) -> tensor<!tf_type.variant<tensor<f32>>>
      // CHECK: "tf.StatefulPartitionedCall"(%[[INIT]],
      // CHECK-SAME: f = @callee_tensorlist_decomposed
      %call = "tf.StatefulPartitionedCall"(%tl, %arg0) {f = @callee, config = "", config_proto = "", executor_type = ""}
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 38.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/tensor_array_ops_decomposition.mlir

      // CHECK: %[[GVAR2:.*]] = "tf.MlirLocalVarOp"() : () -> tensor<!tf_type.resource<tensor<5x3xf32>>>
      // CHECK: "tf.StatefulPartitionedCall"(%[[VAR]], %[[GVAR1]], %[[GVAR2]])
      // CHECK-SAME: f = @callee_tensorarray_decomposed
      %call = "tf.StatefulPartitionedCall"(%ta#0) {f = @callee, config = "", config_proto = "", executor_type = ""}
        : (tensor<!tf_type.resource>) -> tensor<!tf_type.resource>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 49K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/tf_device_passes.td

      let summary = "Rewrites partition calls into Xla launch ops to make the attached function run on XLA.";
    
      let description = [{
        This pass rewrites `tf.PartitionedCall` and `tf.StatefulPartitionedCall`
        operations with `_xla_compile_device_type` attribute in a
        `tf_device.cluster` into `tf.XlaLaunch` operations. This makes the attached
        function execute with XLA. `tf.XlaLaunch` requires resource-type arguments
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 17 18:52:57 UTC 2024
    - 12.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/resource_op_lifting.mlir

      // CHECK: %[[CLUSTER:.*]] = "tf_device.cluster"()
      "tf_device.cluster"() ({
        // CHECK: %[[PC0:.*]] = "tf.StatefulPartitionedCall"(%[[READ0]], %[[READ1]], %[[CONST]])
        // CHECK-SAME: f = @callee_resource_lifted
        %3 = "tf.StatefulPartitionedCall"(%0, %1, %2) {f = @callee, config = "", config_proto = "", executor_type = ""}
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 74K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.cc

    bool IsNonReplicatedGraph(const Graph& graph,
                              const FunctionLibraryDefinition* function_library) {
      auto predicate = [](const Graph& graph) {
        const std::string kStatefulPartitionedCallOp = "StatefulPartitionedCall";
        for (const Node* node : graph.nodes()) {
          auto node_op = node->type_string();
          if (node_op == kStatefulPartitionedCallOp) {
            // Functions called by StatefulfulPartitionedCall ops with
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 12:22:33 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/quantize_preprocess.cc

      pm.addNestedPass<mlir::func::FuncOp>(
          mlir::quant::stablehlo::CreateConvertTFQuantOpsToMHLOPass());
      pm.addPass(mlir::createCanonicalizerPass());
    
      // TF -> StableHLO legalization.
      // Skip StatefulPartitionedCall to preserve aliased functions.
      mlir::odml::AddLegalizeTFToStablehloPasses(pm, /*skip_quantization_ops=*/true,
                                                 /*skip_resize=*/false,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 12:49:45 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.mlir

      // CHECK: return %[[SUBGRAPH_1]] : tensor<1024x3xf32>
      // CHECK: }
    }
    
    // -----
    
    // main function contains PartitionedCall and StatefulPartitionedCall ops which
    // is used to preserve aliased functions. This test make sure stablehlo ops in
    // each PartitionedCall functions are lifted.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 39.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/build_xla_ops_pass.cc

      // we don't have any evidence that choosing a stateless partitioned call helps
      // for performance.
      ops::StatefulPartitionedCall call(
          root.WithOpName("stateful_partitioned_call"), args, n->output_types(),
          func, ops::StatefulPartitionedCall::Attrs{}.ConfigProto(config_string));
    
      for (const Edge* e : n->in_edges()) {
        if (e->IsControlEdge()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 24.3K bytes
    - Viewed (0)
Back to top