- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 68 for tranpose (2.23 sec)
-
tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc
return false; } return true; } // In some cases, transposes may commute with elementwise operations. In order // to make as many tranposes redudant as possible, we can "push" transposes // back so that they fuse later on. These patterns handles 2 such cases in // a conservative fashion; on-net it will never add to the number of transposes // in the graph. // ewise(tpose(x), tpose(y)) -> tpose(ewise(x, y))
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc
transpose.setOperand(1, permutation_op); transpose.getResult().setType(mlir::cast<TensorType>(original_type[idx])); } else { transpose = builder.create<TransposeOp>(loc, result, permutation_op); } // Forward all users to the transpose operation. result.replaceAllUsesWith(transpose); transpose.setOperand(0, result); } // Remove unused transpose operations.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.h
return rewriter.notifyMatchFailure( scatter_op, "unsupported scatter_dims_to_operand_dims"); } // Transpose the operand and so that the trailing dimensions of the // operand are being updated. Then apply a tf.scatter op and transpose // back the result to get the same shape as the original operand. SmallVector<int64_t, 4> permutation_array;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td
(TF_SubOp $beta, (TF_MulOp $m, $mul)))>; class TFi32<int v> : ConstantAttr<I32ElementsAttr, !cast<string>(v)>; // Matmul without transpose on b to matmul with explicit transpose op and // transposed b. def ConvertMatmulWithoutTransposeToWithTranspose : Pat<(TF_MatMulOp $a, $b, ConstBoolAttrFalse:$at, ConstBoolAttrFalse, $grad_a, $grad_b),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir
// CHECK: %[[TRANSPOSE_1:.+]] = stablehlo.transpose %[[CONV]], dims = [0, 3, 1, 2] : (tensor<1x4x4x8xf32>) -> tensor<1x8x4x4xf32> // CHECK: return %[[TRANSPOSE_1]] // ----- // Tests that a `add(convolution(%activation, %weight), %bias)` with the // activation tensor of NCHW format is converted to NHWC convolution + add // operation. Transpose ops are inserted to activations and outputs to match the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 12.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td
let summary = "Merges stablehlo.transpose for activations."; let description = [{ Defers activation transposes (e.g. LHS of `stablehlo.add`) to the output and optionally inserts `stablehlo.transpose`s to match the shape of operands. This is useful when recursively pushing down the extra `stablehlo.transpose`
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/cc/framework/gradients_test.cc
auto dz = Const(scope, {{1.0, 1.0}, {1.0, 1.0}}); auto dx = MatMul(scope, dz, y, MatMul::TransposeB(true)); auto dy = MatMul(scope, x, dz, MatMul::TransposeA(true)); auto du = MatMul(scope, dx, v, MatMul::TransposeB(true)); auto dv = MatMul(scope, u, dx, MatMul::TransposeA(true)); } else { // Call AddSymbolicGradients. auto dz = Const(scope, {{1.0, 1.0}, {1.0, 1.0}});
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 15 15:13:38 UTC 2023 - 25K bytes - Viewed (0) -
tensorflow/cc/gradients/linalg_grad.cc
// // If we're not dealing with repeated labels, and the non-reduced labels // doesn't need to be transposed, then just tiling is enough and there is no // need to call another einsum. For example, tiling is sufficient for // "abcd->ac". But for equations like "aabbcd->ac" (generalized traces) or // "abc->ca" (transpose), we'd need another einsum operation after tiling. if (!has_repeated_labels &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 07 23:11:54 UTC 2022 - 20.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir
// RUN: stablehlo-quant-opt %s -stablehlo-defer-activation-transpose \ // RUN: -split-input-file -verify-diagnostics | FileCheck %s // Tests that an `add(transpose(arg0), arg1)` pattern is converted to // `transpose(add(arg0, transpose(arg1)))`. The transpose in the activation is // deferred to the output of `stablehlo.add` and an extra transpose op is // inserted to the RHS to match the shape of the operand. // CHECK-LABEL: add_with_activation_transpose
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 14.6K bytes - Viewed (0) -
guava/src/com/google/common/graph/Graphs.java
public int inDegree(N node) { return delegate().outDegree(node); // transpose } @Override public int outDegree(N node) { return delegate().inDegree(node); // transpose } @Override public boolean hasEdgeConnecting(N nodeU, N nodeV) { return delegate().hasEdgeConnecting(nodeV, nodeU); // transpose } @Override
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Mon Apr 01 16:15:01 UTC 2024 - 21.7K bytes - Viewed (0)