- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 229 for transposes (0.12 sec)
-
tensorflow/compiler/mlir/lite/tests/push-tpose-through-ewise.mlir
// RUN: tf-opt %s --push-transpose-through-ewise --split-input-file | FileCheck %s // CHECK-LABEL: pushTposeAfterAddSimple func.func @pushTposeAfterAddSimple(%arg0: tensor<2x3x4x5xf32>) -> tensor<5x2x3x4xf32> { %perm = arith.constant dense<[3, 0, 1, 2]> : tensor<4xi32> %0 = "tfl.transpose"(%arg0, %perm) : (tensor<2x3x4x5xf32>, tensor<4xi32>) -> tensor<5x2x3x4xf32> %cst = arith.constant dense<1.0> : tensor<5x2x3x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 8.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
// The rhs matrix must be 2D for fully connected op. return (constant.getType().getRank() == 2); }; auto op = cast<BatchMatMulOpType>(bmm_op); // Create a tfl.transpose op that performs ZX transpose on `input`. auto create_z_x_transpose_op = [&](Value input) -> Value { RankedTensorType input_type = mlir::cast<RankedTensorType>(input.getType()); const int input_rank = input_type.getRank();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/fold_constant_transpose.mlir
} // CHECK: transpose // ----- // Tests that transposing an argument cannot be folded. // CHECK-LABEL: transpose_arg func.func @transpose_arg(%arg0: tensor<2x3xf32>) -> tensor<3x2xf32> { %0 = stablehlo.transpose %arg0, dims = [1, 0] : (tensor<2x3xf32>) -> tensor<3x2xf32> return %0 : tensor<3x2xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 08:06:02 UTC 2024 - 2.2K bytes - Viewed (0) -
guava/src/com/google/common/graph/Graphs.java
public int inDegree(N node) { return delegate().outDegree(node); // transpose } @Override public int outDegree(N node) { return delegate().inDegree(node); // transpose } @Override public boolean hasEdgeConnecting(N nodeU, N nodeV) { return delegate().hasEdgeConnecting(nodeV, nodeU); // transpose } @Override
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Mon Apr 01 16:15:01 UTC 2024 - 21.7K bytes - Viewed (0) -
android/guava/src/com/google/common/graph/Graphs.java
public int inDegree(N node) { return delegate().outDegree(node); // transpose } @Override public int outDegree(N node) { return delegate().inDegree(node); // transpose } @Override public boolean hasEdgeConnecting(N nodeU, N nodeV) { return delegate().hasEdgeConnecting(nodeV, nodeU); // transpose } @Override
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Mon Apr 01 16:15:01 UTC 2024 - 21.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc
return output; } // Computes the transpositions required to convert dnums to one supported by // tf.BatchMatmulV2 and returns the new set of dimension numbers with them. // Transposed LHS shape will be B0,...,Bn,L0,...,Ln,C0,...,Cn and, // transposed RHS shape will be B0,...,Bn,C0,...,Cn,R0,...,Rn respectively. LogicalResult transposeForBatchMatmul( const Location& loc, EinsumDimensionNumbers& dnums, Value* lhs, Value* rhs,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 33.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/optimize_layout.mlir
// CHECK: %[[TPOS:.*]] = stablehlo.transpose %[[PAD]], dims = [0, 3, 1, 2] // CHECK: : (tensor<1x114x114x64xf32>) -> tensor<1x64x114x114xf32> // CHECK: return %[[TPOS]] : tensor<1x64x114x114xf32> func.func @commute_transpose_pad( %arg0: tensor<1x112x112x64xf32>, %padding_val: tensor<f32>) -> tensor<1x64x114x114xf32> { %tspos = stablehlo.transpose %arg0, dims = [0, 3, 1, 2]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 21:59:06 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/dot_general.h
namespace mlir { namespace odml { // Converts mhlo.dot_general to tfl.BatchMatMul. Reshape and Transpose ops will // be inserted to convert to well-formed matrix multiply; i.e., mhlo.dot_general // -> tfl.batch_matmul(mhlo.transpose(mhlo.reshape(operand)), ...). // Note: // 1) Reshape/transpose are inserted because tfl.BatchMatMul requires // size(contracting_dimensions) = 1 and size(output_dim) = 1, whereas
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 04 19:00:01 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nchw.mlir
// CHECK-SAME: src_format = "NHWC" // CHECK: %[[ARG_PERM:.*]] = "tf.Const"() <{value = dense<[0, 3, 1, 2]> : tensor<4xi64>}> // CHECK: %[[IN_TRANSPOSE:[0-9]*]] = "tf.Transpose"(%arg0, %[[ARG_PERM]]) // CHECK: %[[OUT_BP_TRANSPOSE:[0-9]*]] = "tf.Transpose"(%arg2, %[[ARG_PERM]]) // CHECK: %[[CONV2D_BACKPROP:[0-9]*]] = "tf.Conv2DBackpropFilter" // CHECK-SAME: (%[[IN_TRANSPOSE]], %[[FILTER_PERM]], %[[OUT_BP_TRANSPOSE]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nhwc.mlir
// CHECK: %[[CST:.*]] = "tf.Const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi64>}> // CHECK: %[[R0:.*]] = "tf.Transpose"(%[[ARG0]], %[[CST]]) // CHECK: %[[R1:.*]] = "tf.BiasAdd"(%[[R0]], %[[ARG1]]) <{data_format = "NHWC"}> {device = ""} // CHECK: %[[CST_0:.*]] = "tf.Const"() <{value = dense<[0, 3, 1, 2]> : tensor<4xi64>}> // CHECK: "tf.Transpose"(%[[R1]], %[[CST_0]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 4.5K bytes - Viewed (0)