- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 229 for transposes (0.16 sec)
-
tensorflow/compiler/mlir/tensorflow/ir/tf_op_interfaces.td
let description = [{ Operation supports folding operand(s) transposes into the operation itself. (1) Operation might have layout dependent operands and results... Example: MaxPool(Transpose($arg, $perm)) -> Transpose(MaxPool($arg, $perm)) (2) ... or it might have only layout dependent operands: Example: Mean(Transpose($arg, $reduction_dims))
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Nov 30 19:07:07 UTC 2022 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/dot_general.cc
auto lhs_transposed = rewriter.create<mhlo::TransposeOp>( loc, RankedTensorType::get(lhs_transposed_shape, lhs_type.getElementType()), lhs, DenseIntElementsAttr::get( RankedTensorType::get({lhs_rank}, rewriter.getI64Type()), lhs_permutation)); // Transposes rhs shape to be in the order of {batch_dimensions, contracting
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td
let summary = "Merges stablehlo.transpose for activations."; let description = [{ Defers activation transposes (e.g. LHS of `stablehlo.add`) to the output and optionally inserts `stablehlo.transpose`s to match the shape of operands. This is useful when recursively pushing down the extra `stablehlo.transpose`
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/cc/gradients/linalg_grad.cc
// // If we're not dealing with repeated labels, and the non-reduced labels // doesn't need to be transposed, then just tiling is enough and there is no // need to call another einsum. For example, tiling is sufficient for // "abcd->ac". But for equations like "aabbcd->ac" (generalized traces) or // "abc->ca" (transpose), we'd need another einsum operation after tiling. if (!has_repeated_labels &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 07 23:11:54 UTC 2022 - 20.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/passes.h
Option<bool> skip_fold_transpose_in_ops{ *this, "skip-fold-transpose-in-ops", llvm::cl::desc("Skip folding transpose operands in Ops which can support " "different layouts.")}; }; // Layout optimization assigns optimal data layout for layout sensitive // operations, and cancels all redundant transposes. void CreateLayoutOptimizationPipeline(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 31.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.h
// tensor are within the range of the reduced precision. std::unique_ptr<OperationPass<ModuleOp>> CreateReduceTypePrecisionPass(); // Convervatively pushes transposes through elementwise ops to prepare // so redudant ones may be grouped and removed. std::unique_ptr<OperationPass<ModuleOp>> CreatePushTransposeThroughEwisePass();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 07 21:29:34 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc
} return bias; } }; // Rewrites quantized `stablehlo.transpose` to `tfl.transpose`. class RewriteQuantizedTransposeOp : public OpRewritePattern<stablehlo::TransposeOp> { public: using OpRewritePattern<stablehlo::TransposeOp>::OpRewritePattern; LogicalResult match(stablehlo::TransposeOp op) const override { return success(IsOpFullyQuantized(op)); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 09:00:19 UTC 2024 - 99.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc
return rewriter.notifyMatchFailure( conv_op, "Not a detphwise transposed convolution"); } if ((kernel_output_channels % feature_group_count != 0) || (kernel_input_channels != 1)) { return rewriter.notifyMatchFailure( conv_op, "Not a supported detphwise transposed convolution"); } // This needs to be checked because the TFLite runtime generated incorrect
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 154.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td
]; } def MoveTransposesPass : Pass<"tf-move-transposes", "mlir::func::FuncOp"> { let summary = "Move transposes pass."; let constructor = "TF::CreateMoveTransposesPass()"; let options = [ Option<"fold_transpose_in_ops_", "fold-transpose-in-ops", "bool", /*default=*/"true", "Whether to fold transposes in ops which can support folding.">,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 99.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/g3doc/_includes/tf_passes.md
}, { "tf.Yield"() : () -> () }) { is_stateless = true } : (tensor<i1>) -> () ``` ### `-tf-move-transposes` _Move transposes pass._ #### Options ``` -fold-transpose-in-ops : Whether to fold transposes in ops which can support folding. -direction : Move transposes to the beginning or the end of the block where they are defined. ``` ### `-tf-name-anonymous-iterators`
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Aug 02 02:26:39 UTC 2023 - 96.4K bytes - Viewed (0)