- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 45 for NCHW (0.03 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir
// it to NCHW before padding, and does all computations in NCHW (this is the // default setup for ResNet model trained in fp32 on GPU). // // To be able to use Tensor Cores on latest NVIDIA GPUs this model has to be // converted to NHWC data format. // Padding in spatial dimension (NCHW)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nchw.mlir
// RUN: tf-opt %s -tf-layout-optimization=force-data-format=NCHW -verify-diagnostics | FileCheck %s --dump-input=always // CHECK-LABEL: func @transposeConv2D func.func @transposeConv2D(%arg0: tensor<1x3x32x32xf32>, %arg1: tensor<1x1x3x8xf32>) -> tensor<1x8x32x32xf32> { // Convert input: NCHW -> NHWC %0 = "tf.Const"() {value = dense<[0, 2, 3, 1]> : tensor<4xi32>} : () -> tensor<4xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 24 05:47:26 UTC 2022 - 1.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_gpu_cc_60.mlir
func.func @transposeConv2D_3x3_f16(%input: tensor<1x28x28x64xf16>, %filter: tensor<3x3x64x64xf16>) -> tensor<1x26x26x64xf16> { // cuDNN prefers NCHW data format for spatial convolutions in f16 before // compute capability 7.0 (NVIDIA Tensor Cores). // CHECK: "tf.Conv2D"(%[[INPUT_TRANSPOSE:[0-9]*]], %arg1) // CHECK-SAME: data_format = "NCHW" %0 = "tf.Conv2D"(%input, %filter) { data_format = "NHWC", padding = "VALID",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 21 08:41:18 UTC 2022 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_layout_helper.h
// // Example: // %1 = SomeOp(...) {data_format = NHWC} // %2 = Transpose(%1) {permutation = NHWC->NCHW} // %3 = Op(%2) {data_format = NCHW} // // To bypass %2 we have to change data format to shuffle data format from NCHW // to NHWC, which is the reverse of operand permutation (function argument). auto reverse_permutation = GetDataFormatPermutation(op->getDataFormat(), target_data_format);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_gpu_cc_70.mlir
func.func @transposeConv2D_3x3_f32(%input: tensor<1x28x28x64xf32>, %filter: tensor<3x3x64x64xf32>) -> tensor<1x26x26x64xf32> { // cuDNN prefers NCHW data format for spatial convolutions. // CHECK: "tf.Conv2D"(%[[INPUT_TRANSPOSE:[0-9]*]], %arg1) // CHECK-SAME: data_format = "NCHW" %0 = "tf.Conv2D"(%input, %filter) { data_format = "NHWC", padding = "VALID", strides = [1, 1, 1, 1]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 21 08:41:18 UTC 2022 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir
// RUN: stablehlo-quant-opt %s -stablehlo-process-nchw-tensor \ // RUN: -split-input-file -verify-diagnostics | FileCheck %s // Tests that a `convolution(%activation, %weight)` with the activation tensor // NCHW format is converted to NHWC convolution. Transpose ops are inserted to // the activation and output to match the function signature. The weight // constant is transposed. // CHECK-LABEL: nchw_conv // CHECK-SAME: %[[ARG:.+]]: tensor<1x8x4x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 12.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_end.mlir
// MaxPool operand transpose must be folded into the op and MaxPool // must use NCHW data format with updated kernel size and strides. // CHECK: %[[RES_PERM:.*]] = "tf.Const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi32>}> // CHECK: %[[MAX_POOL:[0-9]*]] = "tf.MaxPool"(%arg0) <{data_format = "NCHW", ksize = [1, 1, 3, 3], padding = "SAME", strides = [1, 1, 2, 2]}> : (tensor<1x64x112x112xf32>) -> tensor<1x64x56x56xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td
let dependentDialects = ["mlir::stablehlo::StablehloDialect",]; } def NchwConvolutionToNhwcPass : Pass<"stablehlo-nchw-convolution-to-nhwc", "mlir::func::FuncOp"> { let summary = "Converts stablehlo.convolution op of NCHW format to -> NHWC."; let description = [{ Matches `ConvolutionOp`s with NCHW format and converts it to NHWC format by inserting `TransposeOp`s to input, filter, and output tensors.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nchw.mlir
// RUN: tf-opt %s -tf-layout-assignment=force-data-format=NCHW -verify-diagnostics | FileCheck %s --dump-input=always // IMPORTANT: In the following Conv2D tests tensor shapes do not match // convolution parameters (stride, dilations, etc...). This test only verifies // that changing convolution data layout will update all the attributes. // CHECK-LABEL: func @transposeConv2D
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.cc
// NCHW->NHWC convolution conversion). pm.addNestedPass<func::FuncOp>(createFoldConstantTransposePass()); } void RegisterPassPipelines() { static PassPipelineRegistration<> nchw_tensor_format_processing_pipeline( /*arg=*/"stablehlo-process-nchw-tensor", /*description=*/"Optimizes tensors with NCHW format.", AddProcessNchwTensorPasses); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 8.1K bytes - Viewed (0)