Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for replicate_on_last_tile_dim (0.39 sec)

  1. tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h

    // argument index.
    llvm::SmallVector<llvm::SmallVector<int64_t, 4>, 4> GetMetadataArgumentMapping(
        const tpu::TPUCompileMetadataProto& metadata);
    
    // Gets the proper tensor dimension from XLA OpSharding.
    // "replicate_on_last_tile_dim" and "last_tile_dims" should be deducted from the
    // real Tensor dimensions when tiled.
    // For example:
    // f32[8,512](sharding={devices=[1,1,2]0,1 last_tile_dims={REPLICATED})
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 28 22:18:34 UTC 2024
    - 6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc

      for (auto split_op : split_ops_for_tiled_input) {
        for (auto split_op_output : split_op.getResults()) {
          int64_t repeat_count =
              input_sharding.replicate_on_last_tile_dim()
                  ? *input_sharding.tile_assignment_dimensions().rbegin()
                  : 1;
          for (int64_t i = 0; i < repeat_count; ++i) {
            tiled_inputs->push_back(split_op_output);
          }
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 21:28:13 UTC 2024
    - 34K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/tpu_sharding_identification.mlir

    // -----
    
    // Tests TPIv2 with a "partially tiled" XLA annotation where:
    //   type: OTHER
    //   tile_assignment_dimensions: [4, 1, 1, 1, 2]
    //   tile_assignment_devices: [0, 1, 2, 3, 4, 5, 6, 7]
    //   replicate_on_last_tile_dim: true
    // Serialized string:
    //   "\08\03\1A\05\04\01\01\01\02\22\08\00\01\02\03\04\05\06\070\01"
    
    // CHECK-LABEL: func @partial_tile_partitioned_variable
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 20 19:07:52 UTC 2024
    - 47.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_sharding_identification_pass.cc

        // have one or more dimension(s) than its rank; so, we subtract them to
        // determine which rank the sharding is compatible with.
        tile_assignment_rank -= (int)sharding->replicate_on_last_tile_dim();
        tile_assignment_rank -= sharding->last_tile_dims_size();
    
        if (tensor_rank < tile_assignment_rank) {
          if (partitioned_op) {
            partitioned_op->emitError()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 02:01:13 UTC 2024
    - 28.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/tpu_rewrite.mlir

        func.return %4, %3 : tensor<*xi32>, tensor<*xi1>
      }
    }
    
    // -----
    
    // Tests inputs to TPUComputation that are tiled in multiple dimensions with
    // replicate_on_last_tile_dim set.
    
    // The following OpSharding is used for TPU computation inputs in below test:
    // Proto debug string:
    //  input 0
    //   type: OTHER
    //   tile_assignment_dimensions: 2
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 22:03:30 UTC 2024
    - 172.9K bytes
    - Viewed (0)
Back to top