- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 15 for NCHW (0.04 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir
// it to NCHW before padding, and does all computations in NCHW (this is the // default setup for ResNet model trained in fp32 on GPU). // // To be able to use Tensor Cores on latest NVIDIA GPUs this model has to be // converted to NHWC data format. // Padding in spatial dimension (NCHW)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_layout_helper.h
// // Example: // %1 = SomeOp(...) {data_format = NHWC} // %2 = Transpose(%1) {permutation = NHWC->NCHW} // %3 = Op(%2) {data_format = NCHW} // // To bypass %2 we have to change data format to shuffle data format from NCHW // to NHWC, which is the reverse of operand permutation (function argument). auto reverse_permutation = GetDataFormatPermutation(op->getDataFormat(), target_data_format);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_end.mlir
// MaxPool operand transpose must be folded into the op and MaxPool // must use NCHW data format with updated kernel size and strides. // CHECK: %[[RES_PERM:.*]] = "tf.Const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi32>}> // CHECK: %[[MAX_POOL:[0-9]*]] = "tf.MaxPool"(%arg0) <{data_format = "NCHW", ksize = [1, 1, 3, 3], padding = "SAME", strides = [1, 1, 2, 2]}> : (tensor<1x64x112x112xf32>) -> tensor<1x64x56x56xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nchw.mlir
// RUN: tf-opt %s -tf-layout-assignment=force-data-format=NCHW -verify-diagnostics | FileCheck %s --dump-input=always // IMPORTANT: In the following Conv2D tests tensor shapes do not match // convolution parameters (stride, dilations, etc...). This test only verifies // that changing convolution data layout will update all the attributes. // CHECK-LABEL: func @transposeConv2D
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.cc
// NCHW->NHWC convolution conversion). pm.addNestedPass<func::FuncOp>(createFoldConstantTransposePass()); } void RegisterPassPipelines() { static PassPipelineRegistration<> nchw_tensor_format_processing_pipeline( /*arg=*/"stablehlo-process-nchw-tensor", /*description=*/"Optimizes tensors with NCHW format.", AddProcessNchwTensorPasses); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nhwc.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_layout_helper.cc
reverse[permutation[i]] = i; } return reverse; } SmallVector<int64_t, 4> GetDataFormatPermutation(StringRef from, StringRef to) { if (from == "NHWC" && to == "NCHW") { return {0, 3, 1, 2}; } else if (from == "NCHW" && to == "NHWC") { return {0, 2, 3, 1}; } else { return {}; } } // Shuffle elements in the `attr` according to the permutation. Optional
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_utils.h
const DenseIntElementsAttr& dense_attr, PatternRewriter& builder); // Returns a NHWC shaped type from an NCHW shaped type op. // For example- Given a Composite op that wraps a core.aten.avg_pool2d, this // returns the return type of the tfl.average_pool_2d emitted. Note that the // aten.avg_pool2d works with the NCHW layout while tfl.average_pool_2d assumes // NHWC. ShapedType GetNhwcReturnTypeFromNchw(Operation* old_op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 18:33:05 UTC 2024 - 3.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_utils.cc
ShapedType GetNhwcReturnTypeFromNchw(Operation* old_op) { auto composite_result_shape = mlir::cast<ShapedType>(old_op->getResults().front().getType()).getShape(); std::array<int64_t, 4> output_shape; // NHWC <- NCHW output_shape[0] = composite_result_shape[0]; output_shape[1] = composite_result_shape[2]; output_shape[2] = composite_result_shape[3]; output_shape[3] = composite_result_shape[1];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 18:33:05 UTC 2024 - 3.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.h
// through a StableHLO <-> MHLO roundtrip to utilize the MHLOQuantToInt pass. void AddStablehloQuantToIntPasses(OpPassManager& pm); // Processes tensors with NCHW format (== (batch, channel, height, weight)) by // converting them to NHWC formats along with extra optimizations such as // constant folding the transpose->convolution pattern. This is useful when
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 12:53:33 UTC 2024 - 3.6K bytes - Viewed (0)