Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 50 for 3x3x2x1xf32 (0.22 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

    }
    
    func.func @depthwiseConv2D(tensor<256x32x32x3xf32>, tensor<3x3x3x4xf32>, tensor<256x3x32x32xf32>) -> (tensor<256x30x30x12xf32>, tensor<256x12x30x30xf32>, tensor<256x30x30x12xf32>, tensor<256x30x30x12xf32>) {
    ^bb0(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<3x3x3x4xf32>, %arg2: tensor<256x3x32x32xf32>) :
       // OK
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

      %1 = "tf.Sub"(%0, %cst_0) {data_format = "NHWC"} : (tensor<1x3x2x2xf32>, tensor<1x1x1x2xf32>) -> tensor<1x3x2x2xf32>
      %2 = "tf.Mul"(%1, %cst_1) : (tensor<1x3x2x2xf32>, tensor<1x1x1x2xf32>) -> tensor<1x3x2x2xf32>
      %3 = "tf.AddV2"(%2, %cst_2) : (tensor<1x3x2x2xf32>, tensor<1x1x1x2xf32>) -> tensor<1x3x2x2xf32>
      func.return %3 : tensor<1x3x2x2xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_xla.mlir

        %2 = "tf.Relu"(%1) {device = ""} : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xf32>
        %3 = "tf.FakeQuantWithMinMaxArgs"(%2) {device = "", max = 4.000000e-01 : f32, min = -3.000000e-01 : f32, narrow_range = false, num_bits = 8 : i64} : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir

        %6 = "quantfork.qcast"(%5) : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2x!quant.uniform<i8:f32, 0.0054901962186775953:-19>>
        %7 = "quantfork.dcast"(%6) : (tensor<1x3x2x2x!quant.uniform<i8:f32, 0.0054901962186775953:-19>>) -> tensor<1x3x2x2xf32>
        return %7 : tensor<1x3x2x2xf32>
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 25.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant-4bit.mlir

      %fq = "tf.FakeQuantWithMinMaxVarsPerChannel"(%in, %mini, %maxi) {num_bits = 3, narrow_range = false} : (tensor<3x3x3x16xf32>, tensor<16xf32>, tensor<16xf32>) -> tensor<3x3x3x16xf32>
      %rst = "tf.Conv2D"(%arg, %fq) {T = "tfdtype$DT_FLOAT", data_format = "NHWC", dilations = [1, 2, 3, 1], padding = "SAME", strides = [1, 4, 5, 1]} : (tensor<256x32x32x3xf32>, tensor<3x3x3x16xf32>) -> tensor<256x8x7x16xf32>
      func.return %rst : tensor<256x8x7x16xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 22K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/optimize.mlir

        : (tensor<?x3x2x1xf32>, tensor<2x1x1x1xf32>) -> tensor<?x2x2x1xf32>
      %1 = chlo.broadcast_add %0, %zp_offset : (
          tensor<?x2x2x1xf32>, tensor<?x2x2x1xf32>) -> tensor<?x2x2x1xf32>
      %2 = chlo.broadcast_add %1, %bias : (
          tensor<?x2x2x1xf32>, tensor<1xf32>) ->tensor<?x2x2x1xf32>
      return %2 : tensor<?x2x2x1xf32>
    }
    
    // -----
    
    // CHECK-LABEL: func @dot_general_add_add
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Feb 24 02:26:47 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant.mlir

      %fq = "tf.FakeQuantWithMinMaxVarsPerChannel"(%in, %mini, %maxi) {num_bits = 5, narrow_range = false} : (tensor<3x3x3x16xf32>, tensor<16xf32>, tensor<16xf32>) -> tensor<3x3x3x16xf32>
      %rst = "tf.Conv2D"(%arg, %fq) {T = "tfdtype$DT_FLOAT", data_format = "NHWC", dilations = [1, 2, 3, 1], padding = "SAME", strides = [1, 4, 5, 1]} : (tensor<256x32x32x3xf32>, tensor<3x3x3x16xf32>) -> tensor<256x8x7x16xf32>
      func.return %rst : tensor<256x8x7x16xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir

    // CHECK{LITERAL}: %cst_1 = arith.constant dense<[[[[1.000000e+00], [1.250000e+00]]]]> : tensor<1x1x2x1xf32>
    // CHECK: %3 = tfl.mul %2, %cst_1 {fused_activation_function = "NONE"} : tensor<1x1x2x1xf32>
    // CHECK: %cst_2 = arith.constant dense<[0, 3, 1, 2]> : tensor<4xi32>
    // CHECK: %4 = "tfl.transpose"(%3, %cst_2) : (tensor<1x1x2x1xf32>, tensor<4xi32>) -> tensor<1x1x1x2xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 32.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_weight_param.mlir

        has_token_input_output = false, module = "", platforms = [],
        version = 5 : i64
      } : (tensor<1x3x2x3xf32>, tensor<2x3x3x2xf32>) -> tensor<1x2x2x2xf32>
      return %0 : tensor<1x2x2x2xf32>
    }
    
    // CHECK-LABEL: func.func @qdq_for_conv_weight_empty
    // CHECK-SAME: (%[[ARG_0:.+]]: tensor<1x3x2x3xf32>) -> tensor<1x2x2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 22K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

      %0 = "tf.Less"(%arg0, %arg1) : (tensor<8x7x6x5x?x3x2x1xf32>, tensor<?x3x2x1xf32>) -> tensor<8x7x6x5x?x3x2x1xi1>
      %1 = "tf.SelectV2"(%0, %arg0, %arg1) : (tensor<8x7x6x5x?x3x2x1xi1>, tensor<8x7x6x5x?x3x2x1xf32>, tensor<?x3x2x1xf32>) -> tensor<8x7x6x5x?x3x2x1xf32>
      func.return %1 : tensor<8x7x6x5x?x3x2x1xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
Back to top