Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for 64x3x3x3xf16 (0.11 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

    // MinElement: return %[[dconv:.*]]
    
    // Float16-DAG: %[[w:.*]] = arith.constant dense<1.270000e+02> : tensor<64x3x3x3xf16>
    // Float16-DAG: %[[b:.*]] = arith.constant dense<0.000000e+00> : tensor<64xf16>
    // Float16: %[[dq_w:.*]] = "tfl.dequantize"(%[[w]]) : (tensor<64x3x3x3xf16>) -> tensor<64x3x3x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-op-cost.mlir

    func.func @testConv2DNoBiasCPU(%arg0: tensor<128x32x32x3xf32>, %arg1: tensor<64x3x3x3xf32>, %arg2: none) -> tensor<128x32x32x64xf32> {
      // CHECK: tac.cost = 0x4DD80000
      %0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32, fused_activation_function = "RELU6", tac.device = "CPU"} : (tensor<128x32x32x3xf32>, tensor<64x3x3x3xf32>, none) -> tensor<128x32x32x64xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:29:10 UTC 2022
    - 5.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir

      %w = arith.constant dense<1.270000e+02> : tensor<64x3x3x3xf32>
      %b = arith.constant dense<-1.23697901> : tensor<64xf32>
      %conv = "tfl.conv_2d"(%arg0, %w, %b) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<1x224x224x3xf32>, tensor<64x3x3x3xf32>, tensor<64xf32>) -> tensor<1x112x112x64xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 23 21:09:00 UTC 2024
    - 23.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/optimize_no_verify.mlir

    // TFLite runtime restrictions.
    // RUN: tf-opt %s -tfl-optimize | FileCheck %s
    
    // CHECK-LABEL: fuseScalarAddIntoConv2dHalf
    func.func @fuseScalarAddIntoConv2dHalf(%arg0: tensor<256x32x32x3xf16>, %arg1: tensor<16x3x3x3xf16>) -> tensor<256x8x7x16xf16> {
      %cst = arith.constant dense<1.5> : tensor<f16>
      %cst_0 = arith.constant dense<[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0]> : tensor<16xf16>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

             qtype = tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>>
        %w = "tfl.dequantize"(%q_w) :
             (tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>>) ->
             tensor<64x3x3x3xf32>
        %conv = "tfl.conv_2d"(%input_act, %w, %bias)
    
        but if it is supported, it will be rewritten as:
    
        %q_w = "tfl.pseudo_qconst"() {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
Back to top