Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 44 for 2x5x3xf32 (0.37 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

    // CHECK-NEXT: return %[[dq]] : tensor<2x2xf32>
    }
    
    // CHECK-LABEL: prepareStatistics
    func.func @prepareStatistics(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
      %0 = "quantfork.stats"(%arg0) {
        layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>
      } : (tensor<8x4x3xf32>) -> tensor<8x4x3xf32>
      %1 = "quantfork.stats"(%0) {
        layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>,
        axisStats = dense<[
          [-1.0, 1.0],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir

      // CHECK:  %[[MUL:.*]] = "tf.Mul"(%[[X]], %[[Y]]) : (tensor<2x3xf32>, tensor<3xf32>) -> tensor<2x3xf32>
      // CHECK:  %[[RESULT:.*]] = "tf.SelectV2"(%[[IS_ZERO]], %[[ZERO]], %[[MUL]]) : (tensor<3xi1>, tensor<f32>, tensor<2x3xf32>) -> tensor<2x3xf32>
      %0 = "tf.MulNoNan"(%arg0, %arg1) : (tensor<2x3xf32>, tensor<3xf32>) -> tensor<2x3xf32>
    
      // CHECK: return %[[RESULT]]
      func.return %0 : tensor<2x3xf32>
    }
    
    // CHECK-LABEL: @is_inf
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 92K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

      // CHECK:  return
    }
    
    func.func @pow(%arg0: tensor<2x1x3xf32>, %arg1: tensor<2x1x1xf32>) -> tensor<2x1x3xf32> {
      %0 = "tf.Pow"(%arg0, %arg1) : (tensor<2x1x3xf32>, tensor<2x1x1xf32>) -> tensor<2x1x3xf32>
      func.return %0 : tensor<2x1x3xf32>
    
      // CHECK-LABEL: pow
      // CHECK:  %[[pow:.*]] = tfl.pow(%arg0, %arg1) : (tensor<2x1x3xf32>, tensor<2x1x1xf32>) -> tensor<2x1x3xf32>
      // CHECK:  return
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_same_scale.mlir

        // CHECK: %[[Q1:.*]] = "quantfork.qcast"(%[[ARG0]]) {volatile} : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 5.000000e-03>>
        // CHECK: %[[Q2:.*]] = "quantfork.qcast"(%[[ARG1]]) {volatile} : (tensor<2x3xf32>) -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {6.000000e-03,6.000000e-03,6.000000e-03}>>
        // CHECK: %[[CALL:.*]] = call @quantized_dot_general_fn_1(%[[Q1]], %[[Q2]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 35.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      %1 = "tfl.neg"(%arg0) : (tensor<2x3xf32>) -> tensor<2x3xf32>
      %2 = "tfl.relu"(%1) : (tensor<2x3xf32>) -> tensor<2x3xf32>
      %3 = "tfl.mul"(%alpha, %2) {fused_activation_function = "NONE"} : (tensor<f32>, tensor<2x3xf32>) -> tensor<2x3xf32>
      %4 = "tfl.add"(%0, %3) {fused_activation_function = "NONE"} : (tensor<2x3xf32>, tensor<2x3xf32>) -> tensor<2x3xf32>
      func.return %4 : tensor<2x3xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/shape_inference.mlir

      // CHECK-SAME: ({{%.+}}: tensor<1x2x3xf32>)
      // CHECK-SAME: -> (tensor<1x8x3xf32>, tensor<1x8x3xf32>)
      func.func @while_shape_invariant_different_dims(%arg0: tensor<1x2x3xf32>) -> (tensor<1x8x3xf32>, tensor<1x8x3xf32>) {
        // CHECK: "tf.While"
        // CHECK-SAME: (tensor<1x2x3xf32>)
        // CHECK-SAME: -> tensor<1x8x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 17:24:10 UTC 2024
    - 167.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/tests/tf-tfl-translate-tf-quantize.mlir

    
    module {
    func.func @tfInplaceUpdate(%arg0: tensor<2x1x2xf32>) -> tensor<2x1x2xf32> {
      %1 = arith.constant dense<1> : tensor<1xi32>
      %2 = arith.constant dense<2.0> : tensor<1x1x2xf32>
      %3 = "tf.InplaceUpdate"(%arg0, %1, %2) {device = ""}
        : (tensor<2x1x2xf32>, tensor<1xi32>, tensor<1x1x2xf32>) -> tensor<2x1x2xf32>
      func.return %3 : tensor<2x1x2xf32>
    }
    }
    
    //CHECK: module {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 18:33:43 UTC 2024
    - 1.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize-inplaceupdate.mlir

    func.func @tfInplaceUpdate(%arg0: tensor<2x1x2xf32>) -> tensor<2x1x2xf32> {
      %1 = arith.constant dense<1> : tensor<1xi32>
      %2 = arith.constant dense<2.0> : tensor<1x1x2xf32>
      %3 = "tf.InplaceUpdate"(%arg0, %1, %2) {device = ""}
        : (tensor<2x1x2xf32>, tensor<1xi32>, tensor<1x1x2xf32>) -> tensor<2x1x2xf32>
      func.return %3 : tensor<2x1x2xf32>
    }
    
    }
    
    // CHECK-LABEL: @tfInplaceUpdate
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Dec 16 05:09:09 UTC 2022
    - 993 bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/tests/tfl_legalize_hlo.mlir

    func.func @convert_dot_general_dynamic_batch_dim(%arg0: tensor<2x?x2x3xf32>, %arg1: tensor<2x?x4x3xf32>) -> tensor<2x?x2x4xf32> {
    %0 = "mhlo.dot_general"(%arg0, %arg1) {
      dot_dimension_numbers = #mhlo.dot<
        lhs_batching_dimensions = [0, 1],
        rhs_batching_dimensions = [0, 1],
        lhs_contracting_dimensions = [3],
        rhs_contracting_dimensions = [3]
      >} : (tensor<2x?x2x3xf32>, tensor<2x?x4x3xf32>) -> tensor<2x?x2x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 40.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/optimize.mlir

        >} : (
          tensor<2x5x6xi8>, tensor<6x8x2xi8>
        ) -> tensor<2x5x8xi32>
      %1 = chlo.broadcast_add %0, %zp_offset : (
          tensor<2x5x8xi32>, tensor<2x5x8xi32>) -> tensor<2x5x8xi32>
      %2 = chlo.broadcast_add %1, %bias : (
          tensor<2x5x8xi32>, tensor<2x5x8xi32>) -> tensor<2x5x8xi32>
      return %2 : tensor<2x5x8xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Feb 24 02:26:47 UTC 2024
    - 10.7K bytes
    - Viewed (0)
Back to top