Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 28 of 28 for 1x1x1x16xi32 (0.13 sec)

  1. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir

    // CustomOpNotWeightOnly-LABEL: QuantizeCustomOp
    func.func @QuantizeCustomOp(%arg0: tensor<1x1x1x1xf32>) -> tensor<*xf32> attributes {tf.entry_function = {inputs = "input", outputs = "custom_op"}} {
      %0 = "quantfork.stats"(%arg0) {layerStats = dense<[0.000000e+00, 2.550000e+02]> : tensor<2xf32>} : (tensor<1x1x1x1xf32>) -> tensor<1x1x1x1xf32>
      %w = arith.constant dense<127.0> : tensor<1024x1x1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 23 21:09:00 UTC 2024
    - 23.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir

    // CHECK-DAG: %[[CONST_1:.+]] = stablehlo.constant() <{value = dense<{{.*}}> : tensor<1x1x1x2xi32>}> : () -> tensor<1x1x1x2x!quant.uniform<i32:f32:3, {{.*}}>
    // CHECK: %[[UNIFORM_QUANTIZE_0:.+]] = stablehlo.uniform_quantize %[[ARG_0]] : (tensor<1x3x4x3xf32>) -> tensor<1x3x4x3x!quant.uniform<i8:f32, {{.*}}>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 91.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir

    func.func @conv_with_bias_and_relu_srq(%arg0: tensor<1x5x5x2x!quant.uniform<i8:f32, 2.000000e+00:0>>) -> (tensor<1x4x4x4x!quant.uniform<i8:f32, 8.000000e+00:-128>>) {
        %0 = stablehlo.constant() {value = dense<5> : tensor<1x1x1x4xi32>} : () -> tensor<1x1x1x4x!quant.uniform<i32:f32:3, {2.000000e+00, 2.000000e+00, 2.000000e+00, 2.000000e+00}>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 106.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/shape_inference.mlir

      func.func @simple_folding(%arg0: tensor<1x1x1x1xi32>, %arg1: tensor<1x1x1x1xf32>) -> tensor<?x?x?x?xf32> {
        // CHECK: %[[SHAPE:.*]] = "tf.Shape"
        // CHECK: %[[CONV:.*]] = "tf.Conv2DBackpropInput"(%[[SHAPE]]
        // CHECK-SAME: (tensor<4xi32>, tensor<1x1x1x1xf32>, tensor<1x1x1x1xf32>) -> tensor<1x1x1x1xf32>
        // CHECK: return %[[CONV]] : tensor<1x1x1x1xf32>
        %0 = "tf.Shape"(%arg0) : (tensor<1x1x1x1xi32>) -> tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 17:24:10 UTC 2024
    - 167.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

        } : (tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32>
      func.return %1 : tensor<1x1x1x16xf32>
    
    // CHECK: %0 = "tfl.dequantize"(%arg0)
    // CHECK: %1 = "tfl.average_pool_2d"(%0)
    // CHECK: %2 = "tfl.quantize"(%1)
    // CHECK: %3 = "tfl.dequantize"(%2)
    // CHECK: return %3 : tensor<1x1x1x16xf32>
    }
    
    // CHECK-LABEL: QuantizeMaximum
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

      // Move binary op batched RHS before reshape:
      // binary(reshape(lhs), rhs) => reshape(binary(lhs, flatten(rhs)))
      // Pattern targetted here is as follows-
      // [input, lhr, rhs] == [<1x1024x128>, <1x1024x8x16>, <1x1x8x16xf32>]
      // This is valid only when the-
      // 1.last dimension of lhs is equal to the number of elements in constant rhs.
      // 2.Reduded shape of rhs, here <8x16> is equal to last dimensions of lhs.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    // CHECK:           %[[VAL_2:.*]] = "tf.Reshape"(%[[VAL_0]], %[[VAL_1]]) : (tensor<3x1x16xf32>, tensor<4xi64>) -> tensor<3x1x1x16xf32>
    // CHECK:           %[[VAL_3:.*]] = arith.constant dense<[3, 8, 8, 16]> : tensor<4xi64>
    // CHECK:           %[[VAL_4:.*]] = "tf.BroadcastTo"(%[[VAL_2]], %[[VAL_3]]) : (tensor<3x1x1x16xf32>, tensor<4xi64>) -> tensor<3x8x8x16xf32>
    // CHECK:           return %[[VAL_4]] : tensor<3x8x8x16xf32>
    // CHECK:         }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/optimize.mlir

    func.func @InvalidFuseTileAlreadyBroadcastAlongTileDim(%arg0: tensor<1x1x1x1xf32>) -> tensor<1x6x8x1xf32> {
      %cst_1 = arith.constant dense<[1, 6, 8, 1]> : tensor<4xi32>
      %cst_2 = arith.constant dense<[1, 1, 1, 46]> : tensor<4xi32>
      %cst_20 = arith.constant dense<4.600000e+01> : tensor<f32>
      %0 = "tfl.tile"(%arg0, %cst_1) : (tensor<1x1x1x1xf32>, tensor<4xi32>) -> tensor<1x6x8x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
Back to top