Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 39 for 1x2x3x5xf32 (0.47 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize_per_channel.mlir

          -> tensor<1x2x2x2xf32>
        %3 = "quantfork.stats"(%arg2) {layerStats = dense<[7.05456924, 7.11401462]> : tensor<2xf32>} : (tensor<2xf32>) -> tensor<2xf32>
        %4 = "quantfork.stats"(%2) {layerStats = dense<[-1.36523, 3.57373]> : tensor<2xf32>} : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
        %5 = "chlo.broadcast_add"(%4, %3) : (tensor<1x2x2x2xf32>, tensor<2xf32>) -> tensor<1x2x2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 26 07:48:15 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/push-tpose-through-ewise.mlir

      %perm = arith.constant dense<[3, 0, 1, 2]> : tensor<4xi32>
      %0 = "tfl.transpose"(%arg0, %perm) : (tensor<2x3x4x1xf32>, tensor<4xi32>) -> tensor<1x2x3x4xf32>
      %cst = arith.constant dense<1.0> : tensor<5x2x3x4xf32>
      %1 = "tfl.add"(%0, %cst) { fused_activation_function = "NONE" } : (tensor<1x2x3x4xf32>, tensor<5x2x3x4xf32>) -> tensor<5x2x3x4xf32>
      func.return %1 : tensor<5x2x3x4xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/tests/fuse_mhlo_convolution.mlir

      // CHECK-DAG: %[[CST:.+]] = mhlo.constant dense<[1.000000e-01, 2.000000e-01]> : tensor<2xf32>
      // CHECK-DAG: %[[CST_BCAST:.+]] = "mhlo.broadcast_in_dim"(%[[CST]]) <{broadcast_dimensions = dense<3> : tensor<1xi64>}> : (tensor<2xf32>) -> tensor<1x1x3x2xf32>
      // CHECK-DAG: %[[NEW_FILTER:.+]] =  mhlo.multiply %[[CST_BCAST]], %[[FILTER]] : tensor<1x1x3x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 4.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

        %16 = stablehlo.subtract %10, %15 : tensor<1x3x3x4xf32>
        %17 = stablehlo.broadcast_in_dim %4, dims = [0, 1, 2, 3] : (tensor<1x1x1x4xf32>) -> tensor<1x3x3x4xf32>
        %18 = stablehlo.multiply %16, %17 : tensor<1x3x3x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/cast_bf16_ops_to_f32.mlir

      %2 = "tf.Cast"(%1) {Truncate = false} : (tensor<1x2x2x6xbf16>) -> tensor<1x2x2x6xf32>
      %3 = "tf.IdentityN"(%2) {device = ""} : (tensor<1x2x2x6xf32>) -> tensor<1x2x2x6xf32>
      return %3 : tensor<1x2x2x6xf32>
    }
    
    // CHECK: func @cast_bf16_depthwise_conv_to_fp32
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 8.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

    // CHECK-LABEL: QuantizeUnidirectionalLstmFullPerTensor
    func.func @QuantizeUnidirectionalLstmFullPerTensor(%arg0: tensor<1x2x3xf32>) -> (tensor<1x2x3xf32>) {
      %input = "quantfork.stats"(%arg0) {layerStats = dense<[0.0, 1.0]> : tensor<2xf32>} : (tensor<1x2x3xf32>) -> tensor<1x2x3xf32>
      %1 = "tfl.pseudo_const"() {value = dense<[[0.1]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver.mlir

      %2 = "tf.Relu6"(%1) {device = ""} : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
      return %2 : tensor<1x2x2x2xf32>
    }
    // CHECK-LABEL: @composite_conv2d_with_bias_and_relu6_fn_1
    // CHECK-NOT: "tf.CalibrationStatisticsSaver"
    
    // -----
    
    // Check the IfOp is set to stateful.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc

            func.return %2: tensor<1x3x3x4xf32>
          }
    
          func.func @conv_1_fn(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> {
            %0 = stablehlo.constant dense<2.000000e+00> : tensor<3x3x4x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nchw.mlir

    // RUN: tf-opt %s -tf-layout-optimization=force-data-format=NCHW -verify-diagnostics | FileCheck %s --dump-input=always
    
    // CHECK-LABEL: func @transposeConv2D
    func.func @transposeConv2D(%arg0: tensor<1x3x32x32xf32>, %arg1: tensor<1x1x3x8xf32>) -> tensor<1x8x32x32xf32> {
    
      // Convert input: NCHW -> NHWC
      %0 = "tf.Const"() {value = dense<[0, 2, 3, 1]> : tensor<4xi32>} : () -> tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:47:26 UTC 2022
    - 1.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range-float16.mlir

      func.return %17 : tensor<1x2x3xf32>
    
      // CHECK: %[[NONE:.*]] = "tfl.no_value"() <{value}> : () -> none
      // CHECK: %[[DQ_1:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_2:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_3:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.6K bytes
    - Viewed (0)
Back to top