Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 7 of 7 for 1x112x112x64xf32 (0.25 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

      %dconv_s = "quantfork.stats"(%dconv) {layerStats = dense<[0.000000e+00, 1.000000e+01]> : tensor<2xf32>} : (tensor<1x112x112x64xf32>) -> tensor<1x112x112x64xf32>
      %bmm = "tfl.batch_matmul"(%conv_s, %dconv_s) {adj_x = false, adj_y = true} : (tensor<1x112x112x64xf32>, tensor<1x112x112x64xf32>) -> tensor<1x112x112x112xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir

      func.return %conv : tensor<1x112x112x64xf32>
    
    // CHECK: %[[b:.*]] = arith.constant dense<-1.23697901> : tensor<64xf32>
    // CHECK: %[[w:.*]] = "tfl.pseudo_qconst"() <{qtype = tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32:0, {
    // CHECK: %[[conv:.*]] = "tfl.conv_2d"(%arg0, %[[w]], %[[b]]) <{
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 23 21:09:00 UTC 2024
    - 23.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

    func.func @QuantizeFullyConnected(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x112x112x4xf32> {
      %w = arith.constant dense<127.0> : tensor<4x12xf32>
      %b = arith.constant dense<0.0> : tensor<4xf32>
      %fc = "tfl.fully_connected"(%arg0, %w, %b) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<1x224x224x3xf32>, tensor<4x12xf32>, tensor<4xf32>) -> tensor<1x112x112x4xf32>
      func.return %fc : tensor<1x112x112x4xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

      %6 = "tfl.quantize"(%5) {qtype = tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>} : (tensor<1x112x112x32xf32>) -> tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
      func.return %6 : tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/tpu_space_to_depth_pass.mlir

        %15 = "tf.Conv2D"(%11, %arg2) {data_format = "NHWC", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "VALID", strides = [1, 2, 2, 1], use_cudnn_on_gpu = true} : (tensor<2x230x230x3xf32>, tensor<7x7x3x64xf32>) -> tensor<2x112x112x64xf32>
        %16 = "tf.Mean"(%15, %8) {keep_dims = false} : (tensor<2x112x112x64xf32>, tensor<2xi32>) -> tensor<2x64xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 37.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/quantize.mlir

      %6 = "tfl.quantize"(%5) {qtype = tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>} : (tensor<1x112x112x32xf32>) -> tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
      func.return %6 : tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir

      func.return %2 : tensor<1x112x112x32xf32>
    }
    
    // -----
    
    // CHECK-LABEL: @test_fullyconnected_replace_float
    func.func @test_fullyconnected_replace_float(%arg0: tensor<4x256x6x6xf32>) -> tensor<4x256x36xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.1K bytes
    - Viewed (0)
Back to top