Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 15 of 15 for 3x1x1x2xf32 (0.09 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

      %cst_0 = "tf.Const"() {value = dense<0.400000e+00> : tensor<1x1x1x2xf32>} : () -> tensor<1x1x1x2xf32>
      %cst_1 = "tf.Const"() {value = dense<0.200000e+00> : tensor<1x1x1x2xf32>} : () -> tensor<1x1x1x2xf32>
      %cst_2 = "tf.Const"() {value = dense<0.300000e+00> : tensor<1x1x1x2xf32>} : () -> tensor<1x1x1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

    // MinElement-LABEL: QuantizeCustomOp
    func.func @QuantizeCustomOp(%arg0: tensor<1x1x1x1xf32>) -> (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) attributes {tf.entry_function = {inputs = "input", outputs = "custom_op"}} {
      %0 = "quantfork.stats"(%arg0) {layerStats = dense<[0.000000e+00, 2.550000e+02]> : tensor<2xf32>} : (tensor<1x1x1x1xf32>) -> tensor<1x1x1x1xf32>
      %w_1 = arith.constant dense<127.0> : tensor<4096x1x1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/optimize.mlir

      return %2 : tensor<?x2x2x1xi8>
    }
    
    // -----
    
    // CHECK-LABEL: func @convolution_add_add_f32
    func.func @convolution_add_add_f32(
        %lhs: tensor<?x3x2x1xf32>, %rhs: tensor<2x1x1x1xf32>,
        %zp_offset: tensor<?x2x2x1xf32>, %bias: tensor<1xf32>
      ) -> tensor<?x2x2x1xf32> {
      // CHECK-DAG: %[[conv:.*]] = mhlo.convolution
      // CHECK-DAG: %[[combined:.*]] = chlo.broadcast_add %[[conv:.*]], %[[zp_offset:.*]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Feb 24 02:26:47 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir

    // CustomOpNotWeightOnly-LABEL: QuantizeCustomOp
    func.func @QuantizeCustomOp(%arg0: tensor<1x1x1x1xf32>) -> tensor<*xf32> attributes {tf.entry_function = {inputs = "input", outputs = "custom_op"}} {
      %0 = "quantfork.stats"(%arg0) {layerStats = dense<[0.000000e+00, 2.550000e+02]> : tensor<2xf32>} : (tensor<1x1x1x1xf32>) -> tensor<1x1x1x1xf32>
      %w = arith.constant dense<127.0> : tensor<1024x1x1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 23 21:09:00 UTC 2024
    - 23.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tf_xla_op_to_tf_op.cc

    //     {0}, then it returns: tensor<1x3x5xi32>.
    //   * If `xla_gather_op_output_type` == tensor<3x5xf32> and `collapsed_dims` ==
    //     {1, 3}, then it returns: tensor<3x1x5x1xf32>.
    Type GetSliceOpOutputType(Type xla_gather_op_output_type,
                              const absl::flat_hash_set<int64_t>& collapsed_dims) {
      if (auto ranked_output_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 13.2K bytes
    - Viewed (0)
Back to top