Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 50 for 21x4xf32 (0.15 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir

    // CHECK: return
    
      func.func private @composite_dot_general_fn(%arg0: tensor<1x4xf32>, %arg1: tensor<4x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
          %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x4xf32>, tensor<4x3xf32>) -> tensor<1x3xf32>
          return %0 : tensor<1x3xf32>
      }
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 01:38:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

        %9 = stablehlo.convert %2 : (tensor<2x3xi8>) -> tensor<2x3xf32>
        %10 = stablehlo.dot_general %8, %9, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
        %11 = stablehlo.convert %3 : (tensor<1x3xi32>) -> tensor<1x3xf32>
        %12 = stablehlo.subtract %10, %11 : tensor<1x3xf32>  // q1 * q2 - z1 * q2
        %13 = stablehlo.multiply %12, %4 : tensor<1x3xf32>  // s1 * s2
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir

            tensor<1x5xf32>,
            tensor<2x5xf32>, tensor<2x5xf32>, tensor<2x5xf32>, tensor<2x5xf32>,
            tensor<2x4xf32>, tensor<2x4xf32>, tensor<2x4xf32>, tensor<2x4xf32>,
            tensor<2xf32>, tensor<2xf32>, tensor<2xf32>,
            tensor<2xf32>, tensor<2xf32>, tensor<2xf32>, tensor<2xf32>,
            tensor<4x2xf32>, tensor<4xf32>,
            tensor<1x4xf32>, tensor<1x2xf32>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 52.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver.mlir

        %1 = "tf.BiasAdd"(%0, %arg2) <{data_format = "NHWC"}> {device = ""} : (tensor<1x3xf32>, tensor<3xf32>) -> tensor<1x3xf32>
        return %1 : tensor<1x3xf32>
      }
    
      func.func private @composite_matmul_with_bias_fn_2(%arg0: tensor<1x4xf32>, %arg1: tensor<4x3xf32>, %arg2: tensor<3xf32>) -> tensor<1x3xf32> attributes {tf_quant.composite_function} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_custom_aggregation_ops.mlir

        %3 = "tf.Identity"(%2#1) {device = ""} : (tensor<1x3xf32>) -> tensor<1x3xf32>
        return %3 : tensor<1x3xf32>
      }
    
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 32.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-composite-functions-tf.mlir

    // CHECK:           }) : (tensor<1x?xf32>, tensor<1x0xf32>, tensor<1x0xf32>, tensor<1x0xf32>, tensor<1x0xf32>, tensor<1x3xf32>, tensor<1x3xf32>, tensor<1x3xf32>, tensor<1x3xf32>, none, none, none, tensor<1xf32>, tensor<1xf32>, tensor<1xf32>, tensor<1xf32>, tensor<3x1xf32>, tensor<3xf32>, tensor<1x3xf32>, tensor<1x1xf32>, none, none, none, none) -> tensor<1x3xf32>
    // CHECK:           [[VAL_50:%.*]] = tensor.cast [[VAL_51:%.*]] : tensor<1x3xf32> to tensor<1x?xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 122.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/components/pre_calibration_component.mlir

    func.func @main(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
      %0 = stablehlo.constant dense<1.0> : tensor<4x3xf32>
      %1 = stablehlo.dot_general %arg0, %0, contracting_dims = [1] x [0], precision = [DEFAULT, DEFAULT] : (tensor<1x4xf32>, tensor<4x3xf32>) -> tensor<1x3xf32>
      return %1 : tensor<1x3xf32>
    }
    // CHECK: @main(%[[ARG_0:.+]]: tensor<1x4xf32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/ops.mlir

      func.return %24 : tensor<1x4xf32>
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range-float16.mlir

          time_major = false} : (
            tensor<1x2x3xf32>,
            tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>,
            tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>,
            none, none, none,
            tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>,
            none, none,
            tensor<1x3xf32>, tensor<1x3xf32>,
            none, none, none, none) -> tensor<1x2x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

          time_major = false} : (
            tensor<1x2x3xf32>,
            tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>,
            tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>,
            none, none, none,
            tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>,
            none, none,
            tensor<1x3xf32>, tensor<1x3xf32>,
            none, none, none, none) -> tensor<1x2x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
Back to top