- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 78 for Convolution (0.19 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// once available. // // If set to true, enable channel-wise quantization for: // * Convolution ops: When the attached `Method` also specifies per-channel // quantization. // * Non-convolution ops: All // // Default value: true bool enable_per_channel_quantized_weight = 2 [deprecated = true];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
%0 = stablehlo.convolution(%arg0, %arg1) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[0, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<1x3x4x2xf32> return %0 : tensor<1x3x4x2xf32> } // Checks that the entry function is quantized for convolution. Quantized
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/fuse_convolution_pass.cc
: public PassWrapper<FuseMhloConvolutionPass, OperationPass<func::FuncOp>> { public: StringRef getArgument() const final { return "fuse-mhlo-convolution-pass"; } StringRef getDescription() const final { return "Fuses MHLO binary element-wise ops and convolution op"; } void runOnOperation() override { RewritePatternSet patterns(&getContext());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 22:21:19 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nhwc.mlir
// RUN: tf-opt %s -tf-layout-assignment=force-data-format=NHWC -verify-diagnostics | FileCheck %s --dump-input=always // IMPORTANT: Tensor shapes do not match convolution parameters (stride, // dilations, etc...). This test only verifies that changing convolution data // layout will update all the attributes. // CHECK-LABEL: func @transposeConv2D func.func @transposeConv2D(%input: tensor<1x3x32x32xf32>, %filter: tensor<1x1x3x8xf32>) -> tensor<1x8x7x6xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir
return %0 : tensor<1x3x3x2x!quant.uniform<i8:f32, 4.000000e+0>> } // Confirm that the `stablehlo.convolution` is not converted to `tfl.conv_2d`. // CHECK-LABEL: convolution_upstream_srq_non_const_filter // CHECK-SAME: %[[ARG:.+]]: tensor<1x3x3x4x!quant.uniform<i8:f32, 1.000000e+00:-100>> // CHECK: stablehlo.convolution // CHECK-NOT: tfl.conv_2d // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 106.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir
// CHECK: @uniform_dequantize_0 } // ----- // Tests a variant where there is no stablehlo.convert op in between the // filter constant and the convolution op. // // `filter (f32) -> convolution` // // instead of: // // `filter (i8) -> convert (i8 -> f32) -> convolution` module { // CHECK-LABEL: quantized_conv_op_with_no_filter_convert // CHECK-SAME: %[[ARG:.*]]: tensor<1x3x3x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 37K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc
.getDimSize(input_feature_dimension); // Check for Group Convolution parameters if (feature_group_count != 1 && feature_group_count != input_channels) { // Group convolution is not supported yet. return rewriter.notifyMatchFailure(conv_op, "doesn't support group convolution"); } auto input_spatial_dimensions = dnums.getInputSpatialDimensions();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 154.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc
%0 = stablehlo.constant dense<2.000000e+00> : tensor<3x3x4x4xf32> %1 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 26.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize_per_channel.mlir
%1 = "quantfork.stats"(%arg0) {layerStats = dense<[1.27501142, 2.824783]> : tensor<2xf32>} : (tensor<1x3x2x3xf32>) -> tensor<1x3x2x3xf32> %2 = stablehlo.convolution(%1, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = { stride = [1, 1], pad = [[0, 0], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1] } {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 26 07:48:15 UTC 2024 - 8.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/preprocess_op.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-preprocess-op | FileCheck %s module { // For UniformQuantized depthwise convolution, tensor shape should have // transformed from [H,W,C,M] to [H,W,1,CxM], func.func @depthwise_conv(%arg0: tensor<1x3x4x3xf32>) -> (tensor<*xf32>) { %cst_0 = "tf.Const"() {value = dense<0.000000e+00> : tensor<6xf32>} : () -> tensor<6xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 3K bytes - Viewed (0)