Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 98 for 200xf32 (0.22 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    // CHECK:           %[[VAL_5:.*]] = "tf.Tanh"(%[[VAL_4]]) : (tensor<2xf32>) -> tensor<2xf32>
    // CHECK:           %[[VAL_6:.*]] = "tf.Mul"(%[[VAL_5]], %[[VAL_3]]) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
    // CHECK:           %[[VAL_7:.*]] = "tf.AddV2"(%[[VAL_6]], %[[VAL_3]]) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
    // CHECK:           return %[[VAL_7]] : tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

      %0 = "tf.Squeeze"(%arg0) : (tensor<1x2x2xf32>) -> tensor<2x2xf32>
      func.return %0 : tensor<2x2xf32>
    
    // CHECK-LABEL:squeezeDefault
    // CHECK:  "tfl.squeeze"(%arg0) <{squeeze_dims = []}> : (tensor<1x2x2xf32>) -> tensor<2x2xf32>
    }
    
    func.func @squeezeSingleAxis(%arg0: tensor<2x1x2xf32>) -> tensor<2x2xf32> {
      %0 = "tf.Squeeze"(%arg0) {squeeze_dims = [1]} : (tensor<2x1x2xf32>) -> tensor<2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/const-fold.mlir

    func.func @add_dense_dense_float_mixfng_1_n() -> tensor<2x2xf32> {
      %cst_0 = arith.constant dense<[[1.5, -2.5]]> : tensor<1x2xf32>
      %cst_1 = arith.constant dense<[[-3.], [4.]]> : tensor<2x1xf32>
    
      %0 = "tfl.add"(%cst_0, %cst_1) {fused_activation_function = "NONE"} : (tensor<1x2xf32>, tensor<2x1xf32>) -> tensor<2x2xf32>
    
      func.return %0 : tensor<2x2xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 45.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/shape_inference.mlir

      func.func @simple_chain_with_broadcast(%arg0: tensor<1xf32>, %arg1: tensor<10xf32>) -> tensor<?xf32> {
        // CHECK: %[[MUL:.*]] = "tf.Mul"{{.*}} (tensor<1xf32>, tensor<10xf32>) -> tensor<10xf32>
        // CHECK: %[[ADD:.*]] = "tf.Add"(%[[MUL]], %[[MUL]]) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32>
        // CHECK: %[[CAST:.*]] = "tf.Cast"(%[[ADD]]) {{.*}} : (tensor<10xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 17:24:10 UTC 2024
    - 167.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-composite-functions-tf.mlir

      %2 = "tf.Add"(%0, %1#2) : (tensor<f32>, tensor<?x10xf32>) -> tensor<?x10xf32>
      func.return
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 122.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/quantize.mlir

      %1:4 = "tfl.split"(%cst, %0) {num_splits = 4 : i32} : (tensor<i32>, tensor<4xf32>) -> (tensor<2xf32>, tensor<2xf32>,tensor<2xf32>, tensor<2xf32>)
      %2 = "tfl.quantize"(%1#0) {qtype = tensor<2x!quant.uniform<u8:f32, 1.0>>} : (tensor<2xf32>) -> tensor<2x!quant.uniform<u8:f32, 1.0>>
      %3 = "tfl.quantize"(%1#1) {qtype = tensor<2x!quant.uniform<u8:f32, 1.0>>} : (tensor<2xf32>) -> tensor<2x!quant.uniform<u8:f32, 1.0>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

      // CHECK:  %[[MUL2:.*]] = "tf.Mul"(%arg0, %[[MUL1]]) : (tensor<1x1x6x2xf32>, tensor<2xf32>) -> tensor<1x1x6x2xf32>
      // CHECK:  %[[MUL3:.*]] = "tf.Mul"(%[[MEAN]], %[[MUL1]]) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
      // CHECK:  %[[SUB:.*]] = "tf.Sub"(%arg2, %[[MUL3]]) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/tests/unfuse_mhlo_batch_norm.mlir

    func.func @batchNormInference_2D_inner_features(
        %x: tensor<4x256xf32>, %scale: tensor<256xf32>, %offset: tensor<256xf32>,
        %mean: tensor<256xf32>, %variance: tensor<256xf32>)
        -> (tensor<4x256xf32>) {
      // CHECK-DAG: %[[EPS_BCAST:.+]] = mhlo.constant dense<1.001000e-05> : tensor<256xf32>
      // CHECK-DAG: %[[VARIANCE_EPS:.+]] = mhlo.add %[[VARIANCE]], %[[EPS_BCAST]] : tensor<256xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 10.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/unfuse_mhlo_batch_norm.mlir

    func.func @unfuse_batch_norm(
        %x: tensor<4x256xf32>, %scale: tensor<256xf32>, %offset: tensor<256xf32>,
        %mean: tensor<256xf32>, %variance: tensor<256xf32>)
        -> (tensor<4x256xf32>) {
      // CHECK-DAG: %[[EPS_BCAST:.+]] = mhlo.constant dense<1.001000e-05> : tensor<256xf32>
      // CHECK-DAG: %[[VARIANCE_EPS:.+]] = mhlo.add %[[VARIANCE]], %[[EPS_BCAST]] : tensor<256xf32>
      // CHECK-DAG: %[[STDDEV:.+]] = mhlo.sqrt %[[VARIANCE_EPS]] : tensor<256xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 2.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/device_copy.mlir

      %outputs_1, %outputs_2 = "tf.IdentityN"(%outputs, %outputs_0) {device = "TPU"} : (tensor<2x2xf32>, tensor<2x2xf32>) -> (tensor<2x2xf32>, tensor<2x2xf32>)
      func.return %outputs_0, %outputs_1 : tensor<2x2xf32>, tensor<2x2xf32>
    }
    
    // CHECK-LABEL: func @keep_identity_n_test
    func.func @keep_identity_n_test(%arg0: tensor<2x2xf32>, %arg1: tensor<2x2xf32>) -> (tensor<2x2xf32>, tensor<2x2xf32>) {
        // CHECK: tf.MatMul
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 12:06:33 UTC 2022
    - 5.2K bytes
    - Viewed (0)
Back to top