Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 118 for conv4 (0.04 sec)

  1. tensorflow/compiler/mlir/lite/tests/optimize-after-quantization.mlir

      // CHECK: %[[weight:.*]] = arith.constant dense<3.000000e+00> : tensor<3x3x3x3xf32>
      // CHECK: %[[bias:.*]] = arith.constant dense<[1.500000e+00, 3.000000e+00, 4.500000e+00]>
      // CHECK: %[[conv:.*]] = "tfl.conv_2d"(%arg0, %[[weight]], %[[bias]])
      // CHECK: return %[[conv]] : tensor<256x8x7x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 1.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/modify_io_nodes.mlir

    // CHECK-NEXT: %[[cst2:.*]] = "tfl.pseudo_qconst"() <{qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>, value = dense<0> : tensor<32xi32>}> : () -> tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/tests/fuse_mhlo_convolution.mlir

      // CHECK: %[[CONV:.+]] = mhlo.convolution(%[[INPUT]], %[[NEW_FILTER]]) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = {{\[\[}}0, 0], [0, 0]], rhs_dilate = [1, 1]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<?x256x256x3xf32>, tensor<1x1x3x2xf32>) -> tensor<?x256x256x2xf32>
      // CHECK: %[[SHAPE:.+]] = shape.shape_of %[[CONV]] : tensor<?x256x256x2xf32> -> tensor<4xindex>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 4.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions_weight_only.mlir

    // CHECK: %[[CONV:.+]] = stablehlo.convolution(%[[ARG1]], %[[ARG2]])
    // CHECK-SAME: (tensor<1x3x4x3xf32>, tensor<2x3x3x2x!quant.uniform<i8<-127:127>:f32, 0.0023622048182750312>>) -> tensor<1x3x4x2xf32>
    // CHECK: return %[[CONV]]
    
    // -----
    
    // Test that per-channel weight-only quantized dot_general op is produced when
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  5. subprojects/core/src/test/groovy/org/gradle/api/internal/project/DefaultProjectTest.groovy

            String expectedValue = 'somevalue'
    
            when:
            project.convention.plugins.test = new TestConvention()
            project.conv = expectedValue
    
            then:
            project.conv == expectedValue
            project.convention.plugins.test.conv == expectedValue
            child1.conv == expectedValue
        }
    
        def setPropertyAndPropertyMissingWithProjectAndConventionProperty() {
            given:
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Fri Mar 08 13:46:07 UTC 2024
    - 35.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant-4bit.mlir

    // CHECK: %[[DEQUANTIZE:.*]] = "tfl.dequantize"(%[[QUANTIZE]])
    // CHECK: %[[CONV:.*]] = "tfl.conv_2d"(%arg0, %[[DEQUANTIZE]], %[[CONSTANT]])
    // CHECK: return %[[CONV]]
    }
    
    // CHECK-LABEL: perChannelFakeQuantWithConv2D
    func.func @perChannelFakeQuantWithConv2D(tensor<256x32x32x3xf32>) -> (tensor<256x8x7x16xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 22K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir

        %1 = "tf.PartitionedCall"(%arg0, %cst, %cst_0) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_conv2d_with_bias_and_relu6_fn_1} : (tensor<1x2x2x3xf32>, tensor<2x2x3x2xf32>, tensor<2xf32>) -> tensor<*xf32> loc(callsite("test@conv"("Conv2D_1") at "QuantizationUnit(\12\08Conv2D_1\1a\04conv)"))
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant.mlir

    // CHECK: %[[DEQUANTIZE:.*]] = "tfl.dequantize"(%[[QUANTIZE]])
    // CHECK: %[[CONV:.*]] = "tfl.conv_2d"(%arg0, %[[DEQUANTIZE]], %[[CONSTANT]])
    // CHECK: return %[[CONV]]
    }
    
    // CHECK-LABEL: perChannelFakeQuantWithConv2D
    func.func @perChannelFakeQuantWithConv2D(tensor<256x32x32x3xf32>) -> (tensor<256x8x7x16xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/dilated_conv.h

      // dilation rate.
      // TF python library will rewrite dilated conv to
      // "SpaceToBatch->Conv->BatchToSpace" pattern, and the Conv in the middle
      // always has 'VALID' padding. The padding tensor in `SpaceToBatch` has two
      // parts of contributions, one is to reduce padding of CONV from 'SAME' to
      // 'VALID', and another is to make input shape multiples of dilation rate. The
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_weight_param.mlir

        return %0 : tensor<1x3x4x2xf32>
      }
      // CHECK: func private @composite_conv_fn
      // CHECK: %[[CONV:.+]] = stablehlo.convolution
      // CHECK: return %[[CONV]]
    }
    
    // -----
    
    // Test that q/dq pair with per-channel quantization parameter is inserted
    // between constant and XlaCallModule op with `weight_only_ptq` method of
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 22K bytes
    - Viewed (0)
Back to top