Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for 1x4x2x2xf32 (2.69 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/replace_cast_hacks_with_tf_xla_ops.mlir

        %0 = "tf.Cast"(%arg0) : (tensor<1x3x2x2xi8>) -> tensor<1x3x2x2xi32>
        %1 = "tf.Sub"(%0, %arg2) : (tensor<1x3x2x2xi32>, tensor<i32>) -> tensor<1x3x2x2xi32>
        %2 = "tf.Cast"(%1) : (tensor<1x3x2x2xi32>) -> tensor<1x3x2x2xf32>
        %3 = "tf.Mul"(%2, %arg1) : (tensor<1x3x2x2xf32>, tensor<f32>) -> tensor<1x3x2x2xf32>
        return %3 : tensor<1x3x2x2xf32>
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 81K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir

          } : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>, tensor<1x1x1x2xf32>) -> tensor<1x3x4x2xf32>
        %2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3x4x2xf32>) -> tensor<1x3x4x2xf32>
        return %2 : tensor<1x3x4x2xf32>
      }
    // CHECK: func.func private @quantize_conv_with_bias_fn(%[[ARG_0:.+]]: tensor<1x3x4x3xf32>) -> tensor<1x3x4x2xf32> attributes {tf._original_func_name = "main_0"}
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 91.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir

        // CHECK-NEXT:  %[[SELU_VAL:.*]] = "tf.Mul"(%[[ELU_VAL]], %[[SCALED_ALPHA]]) : (tensor<1x4x4x3xf32>, tensor<f32>) -> tensor<1x4x4x3xf32>
        // CHECK-NEXT:  %[[RES:.*]] = "tf.SelectV2"(%[[PRED]], %[[SCALED_FEATURES]], %[[SELU_VAL]]) : (tensor<1x4x4x3xi1>, tensor<1x4x4x3xf32>, tensor<1x4x4x3xf32>) -> tensor<1x4x4x3xf32>
        // CHECK-NEXT:  return %[[RES]] : tensor<1x4x4x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 92K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

      // CHECK:  %[[MUL3:.*]] = "tf.Mul"(%[[MEAN]], %[[MUL1]]) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
      // CHECK:  %[[SUB:.*]] = "tf.Sub"(%arg2, %[[MUL3]]) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
      // CHECK:  %[[ADD0:.*]] = "tf.Add"(%[[MUL2]], %[[SUB]]) : (tensor<1x1x6x2xf32>, tensor<2xf32>) -> tensor<1x1x6x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

      func.return %1 : tensor<1x2x2x5xf32>
    
    // CHECK: %0 = "tfl.dequantize"(%arg0)
    // CHECK: %1 = "tfl.strided_slice"(%0, %arg1, %arg2, %arg3)
    // CHECK: %2 = "tfl.quantize"(%1) <{qtype = tensor<1x2x2x5x!quant.uniform<u8:f32, 1.000000e-01>>}> {volatile}
    // CHECK: %3 = "tfl.dequantize"(%2)
    // CHECK: return %3 : tensor<1x2x2x5xf32>
    }
    
    // CHECK-LABEL: QuantizePad
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
Back to top