Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 7 of 7 for 1x112x112x32xf32 (0.24 sec)

  1. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

      %6 = "tfl.quantize"(%5) {qtype = tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>} : (tensor<1x112x112x32xf32>) -> tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
      func.return %6 : tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/quantize.mlir

      %6 = "tfl.quantize"(%5) {qtype = tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>} : (tensor<1x112x112x32xf32>) -> tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
      func.return %6 : tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir

      func.return %2 : tensor<1x112x112x32xf32>
    }
    
    // -----
    
    // CHECK-LABEL: @test_fullyconnected_replace_float
    func.func @test_fullyconnected_replace_float(%arg0: tensor<4x256x6x6xf32>) -> tensor<4x256x36xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

      func.return %dc : tensor<1x112x112x32xf32>
    
    // CHECK: %[[cst:.*]] = arith.constant dense<1.270000e+02> : tensor<32x3x3x3xf32>
    // CHECK: %[[q:.*]] = "tfl.quantize"(%[[cst]]) <{qtype = tensor<32x3x3x3x!quant.uniform<i8<-127:127>:f32:3
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

      %dconv_s = "quantfork.stats"(%dconv) {layerStats = dense<[0.000000e+00, 1.000000e+01]> : tensor<2xf32>} : (tensor<1x112x112x64xf32>) -> tensor<1x112x112x64xf32>
      %bmm = "tfl.batch_matmul"(%conv_s, %dconv_s) {adj_x = false, adj_y = true} : (tensor<1x112x112x64xf32>, tensor<1x112x112x64xf32>) -> tensor<1x112x112x112xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir

      %bmm = "tfl.batch_matmul"(%conv, %dconv) {adj_x = false, adj_y = true} : (tensor<1x112x112x64xf32>, tensor<1x112x112x64xf32>) -> tensor<1x112x112x112xf32>
      func.return %bmm, %emb : tensor<1x112x112x112xf32>, tensor<3x3x3x3xf32>
    
    // CHECK-DAG: %[[b:.*]] = arith.constant dense<-1.23697901> : tensor<64xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 23 21:09:00 UTC 2024
    - 23.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/tpu_space_to_depth_pass.mlir

        // CHECK-DAG: %[[SPACETODEPTH0:.*]] = "tf.SpaceToDepth"([[INPUT:.*]]) <{block_size = 2 : i64, data_format = "NHWC"}> : (tensor<2x224x224x3xf32>) -> tensor<2x112x112x12xf32>
        %2 = "tf.AddV2"(%arg2, %arg3) {device = ""} : (tensor<i32>, tensor<i32>) -> tensor<i32>
        %3 = "tf.ReadVariableOp"(%arg6) : (tensor<!tf_type.resource<tensor<7x7x3x64xf32>>>) -> tensor<7x7x3x64xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 37.4K bytes
    - Viewed (0)
Back to top