Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 7 of 7 for XlaSpmdShardToFullShape (0.5 sec)

  1. tensorflow/compiler/jit/xla_ops_on_regular_devices.cc

                                  .Device(DEVICE),                                 \
                              XlaCompileOnDemandOp);                               \
      REGISTER_KERNEL_BUILDER(Name("XlaSpmdShardToFullShape").Device(DEVICE),      \
                              XlaCompileOnDemandOp);                               \
      REGISTER_KERNEL_BUILDER(Name("XlaSharding").Device(DEVICE),                  \
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 19 19:55:14 UTC 2022
    - 8.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc

              %outputs_7, %control_8 = tf_executor.island wraps "tf.XlaSpmdShardToFullShape"(%outputs_5) {dim = -1 : i64, full_shape = #tf_type.shape<2x2>, manual_sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<1x2xf32>) -> tensor<2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 08:08:57 UTC 2024
    - 11.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/compilability_check_util.cc

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 30.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-with-tf2xla-hlo-importer.mlir

        // CHECK: %[[FULL:.*]] = mhlo.custom_call @SPMDShardToFullShape(%[[SHARDING]]) {backend_config = "", mhlo.sharding = "{devices=[2,1]0,1}"} : (tensor<1x2xi64>) -> tensor<2x2xi64>
        // CHECK: return %[[FULL]]
        %0 = "tf.XlaSpmdShardToFullShape"(%arg0) {dim = -1 : i64, full_shape = #tf_type.shape<2x2>, manual_sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<1x2xi64>) -> tensor<2x2xi64>
        func.return %0 : tensor<2x2xi64>
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 38.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/extract_outside_compilation.mlir

        // CHECK-SAME:          recv_key = "host_compute_channel_0_retvals"
        // CHECK-SAME:          send_key = "host_compute_channel_0_args"
        // CHECK:             %[[B_FULL:.+]] = "tf.XlaSpmdShardToFullShape"(%[[B]]) <{dim = -1 : i64, full_shape = #tf_type.shape<2x2>, manual_sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []}> : (tensor<1x2xi64>) -> tensor<2x2xi64>
        // CHECK:             "tf.OpC"(%[[B_FULL]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 129.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/mark_for_compilation_pass.cc

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

      );
    
      let results = (outs
        TF_Tensor:$output
      );
    
      TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
    }
    
    def TF_XlaSpmdShardToFullShapeOp : TF_Op<"XlaSpmdShardToFullShape", [Pure]> {
      let summary = [{
    An op used by XLA SPMD partitioner to switch from manual partitioning to
      }];
    
      let description = [{
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
Back to top