Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 162 for tranpose (0.1 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir

    // RUN: stablehlo-quant-opt %s -stablehlo-defer-activation-transpose \
    // RUN:   -split-input-file -verify-diagnostics | FileCheck %s
    
    // Tests that an `add(transpose(arg0), arg1)` pattern is converted to
    // `transpose(add(arg0, transpose(arg1)))`. The transpose in the activation is
    // deferred to the output of `stablehlo.add` and an extra transpose op is
    // inserted to the RHS to match the shape of the operand.
    
    // CHECK-LABEL: add_with_activation_transpose
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 14.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/push-tpose-through-ewise.mlir

    // RUN: tf-opt %s --push-transpose-through-ewise --split-input-file | FileCheck %s
    
    // CHECK-LABEL: pushTposeAfterAddSimple
    func.func @pushTposeAfterAddSimple(%arg0: tensor<2x3x4x5xf32>) -> tensor<5x2x3x4xf32> {
      %perm = arith.constant dense<[3, 0, 1, 2]> : tensor<4xi32>
      %0 = "tfl.transpose"(%arg0, %perm) : (tensor<2x3x4x5xf32>, tensor<4xi32>) -> tensor<5x2x3x4xf32>
      %cst = arith.constant dense<1.0> : tensor<5x2x3x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc

            return failure();
          }
        }
    
        // Input rhs must be a constant with rank 2.
        if (constant.getType().getRank() != 2) return failure();
    
        // Create a tfl.transpose op that performs ZX transpose on `input`.
        auto create_z_x_transpose_op = [&](Value input) -> Value {
          RankedTensorType input_type =
              mlir::cast<RankedTensorType>(input.getType());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/fold_constant_transpose.mlir

    }
    // CHECK: transpose
    
    // -----
    
    // Tests that transposing an argument cannot be folded.
    
    // CHECK-LABEL: transpose_arg
    func.func @transpose_arg(%arg0: tensor<2x3xf32>) -> tensor<3x2xf32> {
      %0 = stablehlo.transpose %arg0, dims = [1, 0] : (tensor<2x3xf32>) -> tensor<3x2xf32>
      return %0 : tensor<3x2xf32>
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 08:06:02 UTC 2024
    - 2.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/nchw_convolution_to_nhwc.mlir

      return %2 : tensor<1x8x4x4xf32>
    }
    
    // CHECK-DAG: %[[CONST:.+]] = stablehlo.constant {{.*}} : tensor<8x8x3x3xf32>
    // CHECK-DAG: %[[TRANSPOSE_0:.+]] = stablehlo.transpose %[[ARG]], dims = [0, 2, 3, 1] : (tensor<1x8x4x4xf32>) -> tensor<1x4x4x8xf32>
    // CHECK-DAG: %[[TRANSPOSE_1:.+]] = stablehlo.transpose %[[CONST]], dims = [2, 3, 1, 0] : (tensor<8x8x3x3xf32>) -> tensor<3x3x8x8xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 25 23:00:47 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  6. guava/src/com/google/common/graph/Graphs.java

        public int inDegree(N node) {
          return delegate().outDegree(node); // transpose
        }
    
        @Override
        public int outDegree(N node) {
          return delegate().inDegree(node); // transpose
        }
    
        @Override
        public boolean hasEdgeConnecting(N nodeU, N nodeV) {
          return delegate().hasEdgeConnecting(nodeV, nodeU); // transpose
        }
    
        @Override
    Registered: Wed Jun 12 16:38:11 UTC 2024
    - Last Modified: Mon Apr 01 16:15:01 UTC 2024
    - 21.7K bytes
    - Viewed (0)
  7. android/guava/src/com/google/common/graph/Graphs.java

        public int inDegree(N node) {
          return delegate().outDegree(node); // transpose
        }
    
        @Override
        public int outDegree(N node) {
          return delegate().inDegree(node); // transpose
        }
    
        @Override
        public boolean hasEdgeConnecting(N nodeU, N nodeV) {
          return delegate().hasEdgeConnecting(nodeV, nodeU); // transpose
        }
    
        @Override
    Registered: Wed Jun 12 16:38:11 UTC 2024
    - Last Modified: Mon Apr 01 16:15:01 UTC 2024
    - 21.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/defer_activation_transpose.cc

          input.getLoc(), input, rewriter.getDenseI64ArrayAttr(permutation));
    }
    
    // Defers the transpose of the left-hand side (LHS) to the right-hand side and
    // the result of a binary operation. In detail, this rewrites the
    // `op(transpose(%rhs), %lhs)` to `transpose(op(%rhs, transpose(%lhs)))`. The
    // LHS transpose permutation must be a NCHW->NHWC permutation.
    template <typename OpT>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/tests/optimize_layout.mlir

    // CHECK:           %[[TPOS:.*]] = stablehlo.transpose %[[PAD]], dims = [0, 3, 1, 2]
    // CHECK:               : (tensor<1x114x114x64xf32>) -> tensor<1x64x114x114xf32>
    // CHECK:           return %[[TPOS]] : tensor<1x64x114x114xf32>
    
    func.func @commute_transpose_pad(
          %arg0: tensor<1x112x112x64xf32>, %padding_val: tensor<f32>)
          -> tensor<1x64x114x114xf32> {
      %tspos = stablehlo.transpose %arg0, dims = [0, 3, 1, 2]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 21:59:06 UTC 2024
    - 2.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/dot_general.h

    namespace mlir {
    namespace odml {
    // Converts mhlo.dot_general to tfl.BatchMatMul. Reshape and Transpose ops will
    // be inserted to convert to well-formed matrix multiply; i.e., mhlo.dot_general
    // -> tfl.batch_matmul(mhlo.transpose(mhlo.reshape(operand)), ...).
    // Note:
    // 1) Reshape/transpose are inserted because tfl.BatchMatMul requires
    // size(contracting_dimensions) = 1 and size(output_dim) = 1, whereas
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 04 19:00:01 UTC 2023
    - 2.3K bytes
    - Viewed (0)
Back to top