Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 35 for 32xf32 (0.12 sec)

  1. tensorflow/compiler/mlir/lite/tests/const-fold.mlir

      %7 = "tfl.add"(%2, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<  f32>) -> tensor<4xf32>
      %8 = "tfl.add"(%2, %3) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
      %9 = "tfl.add"(%2, %3) {fused_activation_function = "SIGN_BIT"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 45.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/post-quantize.mlir

    // CHECK-NEXT:  %[[quant:.*]] = "tfl.quantize"(%[[split]]#0) <{qtype = tensor<2x!quant.uniform<u8:f32, 1.000000e+00>>}> {volatile} : (tensor<2xf32>) -> tensor<2x!quant.uniform<u8:f32, 1.000000e+00>>
    // CHECK-NEXT:  return %[[quant]] : tensor<2x!quant.uniform<u8:f32, 1.000000e+00>>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver.mlir

        %cst = "tf.Const"() <{value = dense<[0, 1]> : tensor<2xi32>}> {device = ""} : () -> tensor<2xi32>
        %cst_0 = "tf.Const"() <{value = dense<1.000000e+01> : tensor<f32>}> {device = ""} : () -> tensor<f32>
        %0 = "tf.Sum"(%arg0, %cst) <{keep_dims = false}> {device = ""} : (tensor<1x4xf32>, tensor<2xi32>) -> tensor<f32>
        %1 = "tf.Greater"(%0, %cst_0) {device = ""} : (tensor<f32>, tensor<f32>) -> tensor<i1>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/tests/components/tf_to_stablehlo.mlir

      func.return %1#0 : tensor<1x3x2x2xf32>
    }
    // CHECK: func.func @main(%[[ARG:.+]]: tensor<1x3x4x3xf32>) -> tensor<1x3x2x2xf32> {
    // CHECK-DAG: %[[CONST_0:.*]] = stablehlo.constant dense<[{{.*}}]> : tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 08 20:05:12 UTC 2024
    - 13.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/tests/tfl_legalize_hlo.mlir

    // CHECK-NEXT:      %8 = mhlo.dynamic_reshape %arg0, %7 : (tensor<4x4x?xf32>, tensor<3xi32>) -> tensor<4x4x?xf32>
    // CHECK-NEXT:      %9 = "tfl.shape"(%arg1) : (tensor<4x?x256xf32>) -> tensor<3xi32>
    // CHECK-NEXT:      %10 = "tfl.pseudo_const"() <{value = dense<[-1, -1, 0]> : tensor<3xi32>}> : () -> tensor<3xi32>
    // CHECK-NEXT:      %11 = "tfl.pseudo_const"() <{value = dense<[-1, 0, -1]> : tensor<3xi32>}> : () -> tensor<3xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 40.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

            tensor<20xf32>, tensor<20xf32>, tensor<20xf32>,
            tensor<20xf32>, tensor<20xf32>, tensor<20xf32>, tensor<20xf32>,
            tensor<20x20xf32>, none,
            tensor<1x20xf32>, tensor<1x20xf32>,
            none, none, none, none) -> tensor<1x28x20xf32>
        %1 = "quantfork.stats"(%0) {layerStats = dense<[-1.0, 2.0]> : tensor<2xf32>} : (tensor<1x28x20xf32>) -> tensor<1x28x20xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_xla.mlir

    // -----
    
    // CHECK-LABEL: standalone_avgpool_test
    func.func @standalone_avgpool_test(%arg0: tensor<*xf32>) -> tensor<*xf32> {
      %cst = arith.constant dense<[-1, 144]> : tensor<2xi32>
      %0 = "quantfork.qcast"(%arg0) : (tensor<*xf32>) -> tensor<*x!quant.uniform<i8:f32, 5.000000e-02:-10>>
      %1 = "quantfork.dcast"(%0) : (tensor<*x!quant.uniform<i8:f32, 5.000000e-02:-10>>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc

    }
    
    TEST_F(IsOpFullyQuantizedTest, FalseIfOpNotQuantized) {
      constexpr absl::string_view kNotQuantizedAdd = R"mlir(
        func.func @not_quantized_add(%arg0: tensor<2xf32>) -> tensor<2xf32> {
          %0 = stablehlo.add %arg0, %arg0 : tensor<2xf32>
          return %0 : tensor<2xf32>
        }
      )mlir";
    
      OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kNotQuantizedAdd);
      ASSERT_TRUE(module_op);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 28.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir

      %7 = "tf.BiasAdd"(%6, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32>
      func.return %2, %5, %7 : tensor<*xf32>, tensor<*xf32>, tensor<*xf32>
    
    // CHECK: %[[CONST_0:.*]] = "tf.Const"() <{value = dense<0.000000e+00> : tensor<2xf32>}> : () -> tensor<2xf32>
    // CHECK: %[[PARTITIONEDCALL_0:.*]] = "tf.PartitionedCall"(%arg0, %arg1, %[[CONST_0]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_custom_aggregation_ops.mlir

    module {
      func.func @wrap_composite_func(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
        %0 = "tf.PartitionedCall"(%arg0, %arg1) <{f = @composite_conv2d_with_relu6_fn}> {_tfl_quant_trait = "fully_quantizable"}
              : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
        func.return %0 : tensor<*xf32>
      }
    
      func.func @no_composite_func(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 32.1K bytes
    - Viewed (0)
Back to top