- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 23 for NCHW (0.04 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir
// RUN: stablehlo-quant-opt %s -stablehlo-process-nchw-tensor \ // RUN: -split-input-file -verify-diagnostics | FileCheck %s // Tests that a `convolution(%activation, %weight)` with the activation tensor // NCHW format is converted to NHWC convolution. Transpose ops are inserted to // the activation and output to match the function signature. The weight // constant is transposed. // CHECK-LABEL: nchw_conv // CHECK-SAME: %[[ARG:.+]]: tensor<1x8x4x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 12.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td
let dependentDialects = ["mlir::stablehlo::StablehloDialect",]; } def NchwConvolutionToNhwcPass : Pass<"stablehlo-nchw-convolution-to-nhwc", "mlir::func::FuncOp"> { let summary = "Converts stablehlo.convolution op of NCHW format to -> NHWC."; let description = [{ Matches `ConvolutionOp`s with NCHW format and converts it to NHWC format by inserting `TransposeOp`s to input, filter, and output tensors.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.cc
// NCHW->NHWC convolution conversion). pm.addNestedPass<func::FuncOp>(createFoldConstantTransposePass()); } void RegisterPassPipelines() { static PassPipelineRegistration<> nchw_tensor_format_processing_pipeline( /*arg=*/"stablehlo-process-nchw-tensor", /*description=*/"Optimizes tensors with NCHW format.", AddProcessNchwTensorPasses); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/defer_activation_transpose.cc
// LHS transpose permutation must be a NCHW->NHWC permutation. template <typename OpT> void DeferRhsTransposeForBinaryOp(OpT op, PatternRewriter& rewriter) { auto transpose_op = cast<TransposeOp>(op.getOperand(0).getDefiningOp()); Value lhs_pre_transpose = transpose_op.getOperand(); // NCHW -> NHWC for the right-hand side, to match the operand's shape. Value rhs = op.getOperand(1);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_layout_helper.cc
reverse[permutation[i]] = i; } return reverse; } SmallVector<int64_t, 4> GetDataFormatPermutation(StringRef from, StringRef to) { if (from == "NHWC" && to == "NCHW") { return {0, 3, 1, 2}; } else if (from == "NCHW" && to == "NHWC") { return {0, 2, 3, 1}; } else { return {}; } } // Shuffle elements in the `attr` according to the permutation. Optional
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_utils.h
const DenseIntElementsAttr& dense_attr, PatternRewriter& builder); // Returns a NHWC shaped type from an NCHW shaped type op. // For example- Given a Composite op that wraps a core.aten.avg_pool2d, this // returns the return type of the tfl.average_pool_2d emitted. Note that the // aten.avg_pool2d works with the NCHW layout while tfl.average_pool_2d assumes // NHWC. ShapedType GetNhwcReturnTypeFromNchw(Operation* old_op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 18:33:05 UTC 2024 - 3.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h
inline constexpr StringRef kQuantizationMethodAttr = "_quantization_method"; // Permutation from the NHWC tensor format to NCHW. This is an inverse // permutation of `kNchwToNhwcPermutation`. inline constexpr std::array<int64_t, 4> kNhwcToNchwPermutation = {0, 3, 1, 2}; // Permutation from the NCHW tensor format to NHWC. This is an inverse // permutation of `kNchwToNhwcPermutation`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
// Also FusedBatchNorm in training mode prefers NCHW data format. Check if all // users can efficiently use NHWC data format? if (one_by_one && trivial_strides && trivial_dilations) { return "NHWC"; } // If filter spatial dimensions are unknown or not 1x1 we prefer NCHW, because // it's the fastest option on NVIDIA GPUs with cuDNN library support. return "NCHW"; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_utils.cc
ShapedType GetNhwcReturnTypeFromNchw(Operation* old_op) { auto composite_result_shape = mlir::cast<ShapedType>(old_op->getResults().front().getType()).getShape(); std::array<int64_t, 4> output_shape; // NHWC <- NCHW output_shape[0] = composite_result_shape[0]; output_shape[1] = composite_result_shape[2]; output_shape[2] = composite_result_shape[3]; output_shape[3] = composite_result_shape[1];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 18:33:05 UTC 2024 - 3.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.h
// through a StableHLO <-> MHLO roundtrip to utilize the MHLOQuantToInt pass. void AddStablehloQuantToIntPasses(OpPassManager& pm); // Processes tensors with NCHW format (== (batch, channel, height, weight)) by // converting them to NHWC formats along with extra optimizations such as // constant folding the transpose->convolution pattern. This is useful when
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 12:53:33 UTC 2024 - 3.6K bytes - Viewed (0)