Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 94 for 256xf32 (0.15 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/unfuse_mhlo_batch_norm.mlir

    func.func @batchNormInference_2D_inner_features(
        %x: tensor<4x256xf32>, %scale: tensor<256xf32>, %offset: tensor<256xf32>,
        %mean: tensor<256xf32>, %variance: tensor<256xf32>)
        -> (tensor<4x256xf32>) {
      // CHECK-DAG: %[[EPS_BCAST:.+]] = mhlo.constant dense<1.001000e-05> : tensor<256xf32>
      // CHECK-DAG: %[[VARIANCE_EPS:.+]] = mhlo.add %[[VARIANCE]], %[[EPS_BCAST]] : tensor<256xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 10.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir

           {
             data_format = "NCHW",
             epsilon = 1.001000e-05 : f32
           } : (tensor<?x256x56x56xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>)
            -> (tensor<?x256x56x56xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<*xf32>)
    
      // CHECK: %[[BATCH_NORM1:[_a-z0-9]*]], {{.*}} = "tf.FusedBatchNormV3"
      // CHECK-SAME: %[[CONV1]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/unfuse_mhlo_batch_norm.mlir

    func.func @unfuse_batch_norm(
        %x: tensor<4x256xf32>, %scale: tensor<256xf32>, %offset: tensor<256xf32>,
        %mean: tensor<256xf32>, %variance: tensor<256xf32>)
        -> (tensor<4x256xf32>) {
      // CHECK-DAG: %[[EPS_BCAST:.+]] = mhlo.constant dense<1.001000e-05> : tensor<256xf32>
      // CHECK-DAG: %[[VARIANCE_EPS:.+]] = mhlo.add %[[VARIANCE]], %[[EPS_BCAST]] : tensor<256xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 2.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

      %1 = "tf.BiasAdd"(%0, %cst_0) {data_format = "NHWC"} : (tensor<?x?x?x256xf32>, tensor<256xf32>) -> tensor<?x?x?x256xf32>
      %2 = "tf.Mul"(%1, %cst_1) : (tensor<?x?x?x256xf32>, tensor<256xf32>) -> tensor<?x?x?x256xf32>
      func.return %2 : tensor<?x?x?x256xf32>
    }
    // CHECK: func @conv2d_with_large_weight_and_mul
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nhwc.mlir

      func.return %y : tensor<1x64x28x28xf32>
    }
    
    // CHECK-LABEL: bias_add_nchw
    func.func @bias_add_nchw(%arg0: tensor<1x256x150x150xf32>, %arg1: tensor<256xf32>) -> tensor<1x256x150x150xf32> {
      // CHECK: (%[[ARG0:.*]]: tensor<1x256x150x150xf32>, %[[ARG1:.*]]: tensor<256xf32>)
      // CHECK: %[[CST:.*]] = "tf.Const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi64>}>
      // CHECK: %[[R0:.*]] = "tf.Transpose"(%[[ARG0]], %[[CST]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    // CHECK:           %[[VAL_5:.*]] = "tf.Tanh"(%[[VAL_4]]) : (tensor<2xf32>) -> tensor<2xf32>
    // CHECK:           %[[VAL_6:.*]] = "tf.Mul"(%[[VAL_5]], %[[VAL_3]]) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
    // CHECK:           %[[VAL_7:.*]] = "tf.AddV2"(%[[VAL_6]], %[[VAL_3]]) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
    // CHECK:           return %[[VAL_7]] : tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      %1 = "tfl.sum"(%0, %cst) {keep_dims = false} : (tensor<2x2xf32>, tensor<1xi32>) -> tensor<f32>
      %2 = "tfl.sqrt"(%1) : (tensor<f32>) -> tensor<f32>
      %3 = "tfl.div"(%arg0, %2) {fused_activation_function = "NONE"} : (tensor<2x2xf32>, tensor<f32>) -> tensor<2x2xf32>
      func.return %3: tensor<2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

      %3 = "tfl.dequantize"(%2) : (tensor<2x3x!quant.uniform<i16:f32, 1.0>>) -> (tensor<2x3xf32>)
      %4 = "tfl.concatenation"(%1, %3) {axis = -1 : i32, fused_activation_function = "NONE"} : (tensor<2x1xf32>, tensor<2x3xf32>) -> tensor<2x4xf32>
      %5 = "tfl.add"(%4, %arg2) {fused_activation_function = "NONE"} : (tensor<2x4xf32>, tensor<2x4xf32>) -> tensor<2x4xf32>
      func.return %5: tensor<2x4xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/tests/tfl_legalize_hlo.mlir

    // CHECK-NEXT:      %17 = mhlo.dynamic_reshape %arg1, %16 : (tensor<4x?x256xf32>, tensor<3xi32>) -> tensor<4x?x256xf32>
    // CHECK-NEXT:      %18 = "tfl.batch_matmul"(%8, %17) <{adj_x = false, adj_y = false, asymmetric_quantize_inputs = false}> : (tensor<4x4x?xf32>, tensor<4x?x256xf32>) -> tensor<4x4x256xf32>
    // CHECK-NEXT:      %19 = mhlo.reshape %18 : (tensor<4x4x256xf32>) -> tensor<4x4x256xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 40.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir

      // CHECK-DAG: %[[EXP:.*]] = "tf.Exp"(%[[SHIFTED]]) : (tensor<2x3xf32>) -> tensor<2x3xf32>
      // CHECK-DAG: %[[SUM:.*]] = "tf.Sum"(%[[EXP]], %[[AXIS]]) <{keep_dims = true}> : (tensor<2x3xf32>, tensor<1xi64>) -> tensor<2x1xf32>
      // CHECK-DAG: %[[RESULT:.*]] = "tf.Div"(%[[EXP]], %[[SUM]]) : (tensor<2x3xf32>, tensor<2x1xf32>) -> tensor<2x3xf32>
      // CHECK: return %[[RESULT]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 92K bytes
    - Viewed (0)
Back to top