Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 87 for 7xf32 (0.83 sec)

  1. tensorflow/compiler/mlir/lite/tests/const-fold.mlir

      %7 = "tfl.add"(%2, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<  f32>) -> tensor<4xf32>
      %8 = "tfl.add"(%2, %3) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
      %9 = "tfl.add"(%2, %3) {fused_activation_function = "SIGN_BIT"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 45.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

      func.return %0#0 : tensor<?x?x?x?xf32>
    }
    
    // -----
    
    // CHECK-LABEL: fusedBatchNormV3_training_dynamic_unsupported1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

      %3 = "tf.AddN"(%1, %arg0, %1) : (tensor<2xf32>, tensor<2xf32> , tensor<2xf32>) -> tensor<2xf32>
      %4 = "tf.AddN"(%1, %1) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
      %5 = "tf.AddN"(%arg0, %1, %0) : (tensor<2xf32>, tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
      func.return %2, %3, %4, %5: tensor<2xf32>, tensor<2xf32>, tensor<2xf32>, tensor<2xf32>
    }
    
    // CHECK-LABEL: func @addNWithZerosInt
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/ops.mlir

      %0 = "tfl.unidirectional_sequence_lstm"(%arg0,...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

      %10 = "tfl.pseudo_const"() {value = dense<0.000000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
      %11 = "tfl.pseudo_const"() {value = dense<1.000000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
      %recurrent_input = "tfl.pseudo_const"() {value = dense<0.000000e+00> : tensor<1x3xf32>} : () -> tensor<1x3xf32>
      %recurrent_stats = "quantfork.stats"(%recurrent_input) {layerStats = dense<[0.0, 1.0]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-gpu.mlir

      func.return %0 : tensor<*xf32>
    }
    
    // CHECK-NOT: "tfl.reshape"
    // CHECK: "tfl.pack"
    
    // -----
    
    func.func @squaredDifference(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32> {
      %0 = "tfl.squared_difference"(%arg0, %arg1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
      func.return %0 : tensor<4xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

    }
    
    func.func @div(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<1xf32> {
      %0 = "tf.Div"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
      func.return %0: tensor<1xf32>
    
    // CHECK-LABEL: div
    // CHECK:  tfl.div %arg0, %arg1 {fused_activation_function = "NONE"} : tensor<1xf32>
    // CHECK:  return
    }
    
    func.func @squaredDifferenceRelu(tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

      // Use other output
      %3:6 = "tf.FusedBatchNormV3"( %2#0, %arg1, %arg2, %arg3, %arg4) {T = "tfdtype$DT_FLOAT", U = "tfdtype$DT_FLOAT", data_format = "NHWC", epsilon = 0.001 : f32, is_training = false} : (tensor<8x8x8x8xf32>, tensor<8xf32>, tensor<8xf32>, tensor<8xf32>, tensor<8xf32>) -> (tensor<8x8x8x8xf32>, tensor<8xf32>, tensor<8xf32>, tensor<8xf32>, tensor<8xf32>, tensor<8xf32>)
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

      %1:2 = "tfl.split"(%cst, %0) {num_splits = 2 : i32} : (tensor<i32>, tensor<4xf32>) -> (tensor<2xf32>, tensor<2xf32>)
      %2 = "tfl.quantize"(%1#0) {qtype = tensor<2x!quant.uniform<u8:f32, 1.0>>} : (tensor<2xf32>) -> tensor<2x!quant.uniform<u8:f32, 1.0>>
      %3 = "tfl.quantize"(%1#1) {qtype = tensor<2x!quant.uniform<u8:f32, 1.0>>} : (tensor<2xf32>) -> tensor<2x!quant.uniform<u8:f32, 1.0>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_default.mlir

    // CHECK-LABEL: floor_mod
    func.func @floor_mod(%arg0: tensor<5xf32>, %arg1: tensor<5xf32>) -> tensor<5xf32> {
      %0 = "tf.FloorMod"(%arg0, %arg1) : (tensor<5xf32>, tensor<5xf32>) -> tensor<5xf32>
      func.return %0 : tensor<5xf32>
    // CHECK: %[[CUSTOM_0:.*]] = "tfl.custom"(%arg0, %arg1) <{custom_code = "FlexFloorMod", custom_option = #tfl<const_bytes : "{{.*}}">}> : (tensor<5xf32>, tensor<5xf32>) -> tensor<5xf32>
    // CHECK: return %[[CUSTOM_0]] : tensor<5xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.4K bytes
    - Viewed (0)
Back to top