Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 65 for 5x2xf32 (0.11 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

      %0 = "quantfork.stats"(%arg0) {layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>} : (tensor<1x1xf32>) -> tensor<1x1xf32>
      %1 = "tfl.tanh"(%0) : (tensor<1x1xf32>) -> tensor<1x1xf32>
      %2 = "quantfork.stats"(%1) {layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>} : (tensor<1x1xf32>) -> tensor<1x1xf32>
      func.return %2 : tensor<1x1xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_weight_param.mlir

        version = 5 : i64
      } : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
      return %0 : tensor<1x3xf32>
    }
    
    // CHECK-LABEL: func.func @qdq_for_dot_general_weight_empty
    // CHECK-SAME: (%[[ARG_0:.+]]: tensor<1x2xf32>) -> tensor<1x3xf32>
    // CHECK: %[[CST:.+]] = "tf.Const"() <{value = dense<3.000000e-01> : tensor<2x3xf32>}> : () -> tensor<2x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 22K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/post_quantize.mlir

      %add = stablehlo.add %dq1, %dq2 : (tensor<3x2xf32>, tensor<3x2xf32>) -> tensor<3x2xf32>
      func.return %add : tensor<3x2xf32>
    }
    
    // -----
    
    // CHECK-LABEL: @quantize_constant
    // CHECK-SAME: %[[ARG0:.*]]: tensor<1x3xf32>
    func.func @quantize_constant(%arg0: tensor<1x3xf32>) -> tensor<1x2xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 4.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

    func.func @topk_3(%arg0: tensor<?x8xf32>) -> (tensor<?x2xf32>, tensor<?x2xi32>) {
      %0 = "tf.Const"() { value = dense<2> : tensor<i32> } : () -> tensor<i32>
      %1:2 = "tf.TopKV2"(%arg0, %0) : (tensor<?x8xf32>, tensor<i32>) -> (tensor<?x2xf32>, tensor<?x2xi32>)
      func.return %1#0, %1#1: tensor<?x2xf32>, tensor<?x2xi32>
    
    // CHECK-LABEL: topk_3
    // CHECK:  "tfl.topk_v2"(%arg0, %cst) : (tensor<?x8xf32>, tensor<i32>) -> (tensor<?x2xf32>, tensor<?x2xi32>)
    // CHECK:  return
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

    func.func @einsum(%arg0: tensor<2x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<2x4xf32> {
      // CHECK:  mhlo.einsum
      %0 = "tf.Einsum"(%arg0, %arg1) {equation = "ab,bc->ac"} : (tensor<2x3xf32>, tensor<3x4xf32>) -> tensor<2x4xf32>
      func.return %0: tensor<2x4xf32>
    }
    
    // -----
    
    // CHECK-LABEL: func @unary_einsum
    func.func @unary_einsum(%arg0: tensor<2x3xf32>) -> tensor<2x2xf32> {
      // CHECK:  mhlo.unary_einsum
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir

            tensor<1x1x5xf32>,
            tensor<2x5xf32>, tensor<2x5xf32>, tensor<2x5xf32>, tensor<2x5xf32>,
            tensor<2x4xf32>, tensor<2x4xf32>, tensor<2x4xf32>, tensor<2x4xf32>,
            tensor<2xf32>, tensor<2xf32>, tensor<2xf32>,
            tensor<2xf32>, tensor<2xf32>, tensor<2xf32>, tensor<2xf32>,
            tensor<4x2xf32>, tensor<4xf32>,
            tensor<1x4xf32>, tensor<1x2xf32>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 52.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir

      } : (tensor<2x!tf_type.qint8>, tensor<f32>, tensor<i32>) -> tensor<2xf32>
      func.return %1 : tensor<2xf32>
    }
    
    // -----
    
    // CHECK-LABEL: func @uniform_quantize_and_dequantize_per_axis
    func.func @uniform_quantize_and_dequantize_per_axis(%arg0 : tensor<2x2xf32>) -> tensor<2x2xf32> {
      %scales = "tf.Const"() { value = dense<[1.0, 2.0]> : tensor<2xf32> } : () -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 01:25:29 UTC 2024
    - 37.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions_weight_only.mlir

        return %1 : tensor<1x3xf32>
      }
    
      func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
        %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
        return %0 : tensor<1x3xf32>
      }
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc

          return %1 : tensor<1x3xf32>
        }
    
        func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> {
          %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
          return %0 : tensor<1x3xf32>
        }
      )mlir";
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 10:10:34 UTC 2024
    - 18.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/quantization.mlir

      func.return %6 : tensor<1x401408xf32>
    }
    
    // CHECK-LABEL: quantized_constant
    func.func @quantized_constant(%arg0: tensor<1x2xf32>) -> tensor<2x2xf32> {
      %1 = "tfl.quantize"(%arg0) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.0>>, volatile} : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<u8:f32, 1.0>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.3K bytes
    - Viewed (0)
Back to top