Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 162 for tranpose (0.49 sec)

  1. tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc

        return false;
      }
    
      return true;
    }
    
    // In some cases, transposes may commute with elementwise operations. In order
    // to make as many tranposes redudant as possible, we can "push" transposes
    // back so that they fuse later on. These patterns handles 2 such cases in
    // a conservative fashion; on-net it will never add to the number of transposes
    // in the graph.
    
    // ewise(tpose(x), tpose(y)) -> tpose(ewise(x, y))
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/optimize.cc

      // available in TFLite.
      //
      // - tfl.gather_nd -> tfl.transpose -> tfl.gather_nd -> tfl.transpose
      //   where ...
      //     - all tfl.gather_nd op instances take [0, 0, 1, 1, ..., n-1, n-1] as
      //       the indices arugment,
      //     - first tranpose op takes perm [2, 1, 0, 3], and
      //     - second transpose op take perm [1, 2, 0, 3].
      //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/transpose-op.mlir

      %1 = "tf.Const"() {value = dense<[0, 0x4141, 3, 1]> : tensor<4xi32>} : () -> tensor<4xi32>
      %2 = "tf.Transpose"(%arg0, %0) : (tensor<1x4x4x8xf32>, tensor<4xi32>) -> tensor<1x8x4x4xf32>
      // expected-error @+1 {{'tf.Transpose' op perm[1]=16705 must be in range [-4, 4)}}
      %3 = "tf.Transpose"(%2, %1) : (tensor<1x8x4x4xf32>, tensor<4xi32>) -> tensor<1x4x4x8xf32>
      func.return %3 : tensor<1x4x4x8xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 23 05:52:37 UTC 2023
    - 634 bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/fold_constant_transpose.cc

        // `TransposeOp`'s permutation attribute.
        const DenseElementsTransposer transposer(original_shape,
                                                 op.getPermutation());
        SmallVector<float> transposed_values =
            transposer.TransposeValues(original_values);
    
        // Create a new constant op with the transposed values.
        const Location combined_loc =
            rewriter.getFusedLoc({const_op.getLoc(), op.getLoc()});
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc

          transpose.setOperand(1, permutation_op);
          transpose.getResult().setType(mlir::cast<TensorType>(original_type[idx]));
        } else {
          transpose = builder.create<TransposeOp>(loc, result, permutation_op);
        }
    
        // Forward all users to the transpose operation.
        result.replaceAllUsesWith(transpose);
        transpose.setOperand(0, result);
      }
    
      // Remove unused transpose operations.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 19.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.h

            return rewriter.notifyMatchFailure(
                scatter_op, "unsupported scatter_dims_to_operand_dims");
          }
    
          // Transpose the operand and so that the trailing dimensions of the
          // operand are being updated. Then apply a tf.scatter op and transpose
          // back the result to get the same shape as the original operand.
    
          SmallVector<int64_t, 4> permutation_array;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td

        (TF_SubOp $beta, (TF_MulOp $m, $mul)))>;
    
    class TFi32<int v> : ConstantAttr<I32ElementsAttr, !cast<string>(v)>;
    
    // Matmul without transpose on b to matmul with explicit transpose op and
    // transposed b.
    def ConvertMatmulWithoutTransposeToWithTranspose :
          Pat<(TF_MatMulOp $a, $b, ConstBoolAttrFalse:$at, ConstBoolAttrFalse, $grad_a, $grad_b),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir

    // CHECK: %[[TRANSPOSE_1:.+]] = stablehlo.transpose %[[CONV]], dims = [0, 3, 1, 2] : (tensor<1x4x4x8xf32>) -> tensor<1x8x4x4xf32>
    // CHECK: return %[[TRANSPOSE_1]]
    
    // -----
    
    // Tests that a `add(convolution(%activation, %weight), %bias)` with the
    // activation tensor of NCHW format is converted to NHWC convolution + add
    // operation. Transpose ops are inserted to activations and outputs to match the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_end.mlir

      // CHECK: %[[RES_TRANSPOSE_0:[0-9]*]] = "tf.Transpose"(%[[ADD]], %[[RES_PERM]])
      // CHECK: %[[RES_TRANSPOSE_1:[0-9]*]] = "tf.Transpose"(%[[RES_TRANSPOSE_0]], %[[RES_PERM]])
      // CHECK: return %[[RES_TRANSPOSE_1]]
    
      %0 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
      %1 = "tf.Transpose"(%arg0, %0) : (tensor<1x4x4x8xf32>, tensor<4xi32>) -> tensor<1x8x4x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/g3doc/space_to_depth.md

    disable this feature as well.
    
    ### Fuse SpaceToDepth with Automatic Double Transpose
    
    The transpose and reshape op in SpaceToDepthOp on TPU hosts may cause image
    model to be infeed bound. To reduce host time, space to depth transform can be
    fused with `automatic double transpose` to reduce extra overhead on the host.
    
    ### Extend from Conv2D to Conv3D
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Oct 24 02:51:43 UTC 2020
    - 8.3K bytes
    - Viewed (0)
Back to top