Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 28 for 7xf32 (0.12 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/tests/e2e/device-transform-nnapi.mlir

    module {
      // CHECK-LABEL: main
      func.func @main(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32> {
        %0 = "tfl.squared_difference"(%arg0, %arg1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
        func.return %0 : tensor<4xf32>
        // CHECK:  [[VAL_0:%.*]] = tfl.sub %arg0, %arg1 {fused_activation_function = "NONE"} : tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/quantization/tests/import_quant_stats.mlir

    
    // CHECK-LABEL: import_stats_skip
    func.func @import_stats_skip(%arg0: tensor<4xf32>, %cst: tensor<i32>) -> (tensor<2xf32>,tensor<2xf32>) {
      %0:2 = "tfl.split"(%cst, %arg0) {num_splits = 2 : i32} : (tensor<i32>, tensor<4xf32>) -> (tensor<2xf32>, tensor<2xf32>)
        loc(fused["skip1", "skip2.cc":10:8, callsite("op" at "skip3.cc":10:8)])
      func.return %0#0, %0#1 : tensor<2xf32>, tensor<2xf32>
    
    // CHECK-NEXT: "tfl.split"
    // CHECK-NEXT: return
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 06:25:50 UTC 2024
    - 2.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_legacy.mlir

    }
    
    // CHECK-LABEL: add
    func.func @add(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<1xf32> {
      %0 = "tf.AddV2"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
      func.return %0: tensor<1xf32>
    // CHECK: %[[ADD_0:.*]] = "tf.AddV2"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
    // CHECK: return %[[ADD_0]] : tensor<1xf32>
    }
    
    // CHECK-LABEL: softmax
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range-float16.mlir

            none, none, none,
            tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>,
            none, none,
            tensor<1x3xf32>, tensor<1x3xf32>,
            none, none, none, none) -> tensor<1x2x3xf32>
      %17 = "quantfork.stats"(%16) {layerStats = dense<[-0.1, 0.1]> : tensor<2xf32>} : (tensor<1x2x3xf32>) -> tensor<1x2x3xf32>
      func.return %17 : tensor<1x2x3xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/math.mlir

      // Confirm that attributes that cannot be stored in the flatbuffer options
      // for a given operator are dropped silently.
      %1 = "tfl.squared_difference"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("squared_difference")
      %2 = "tfl.mul"(%arg0, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/e2e/simple-graph.mlir

    module {
    func.func @main(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>, %arg3: tensor<1xf32>) -> tensor<2x1xf32> attributes {tf.entry_function = {inputs = "input0,input1,input2,input3", outputs = "output"}} {
      %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
      %1 = "tfl.mul"(%0, %arg2) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/tpu-resource-read-for-write.mlir

      %fill = "tf.Fill"(%cst_0, %cst) : (tensor<1xi64>, tensor<f32>) -> tensor<1xf32>
      tf_device.replicate([%0, %fill] as %arg_r0: tensor<1xf32>) {n = 2 : i32} {
        %1 = "tf_device.launch"() <{device = "TPU_REPLICATED_HOST_0"}> ({
          %2 = "tf.Identity"(%arg_r0) : (tensor<1xf32>) -> tensor<1xf32>
          tf_device.return %2 : tensor<1xf32>
        }) : () -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 16:54:40 UTC 2024
    - 5.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter_test.cc

      %2 = "tfl.add"(%arg0, %arg3) {fused_activation_function = "RELU6", per_device_costs = {CPU = 5.0 : f32, GPU = 1.0 : f32}, tac.device = "GPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
      %3 = "tfl.pack"(%1, %2) {axis = 0 : i32, per_device_costs = {CPU = 2.0 : f32, GPU = -1.0 : f32}, values_count = 2 : i32, tac.device = "CPU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 06:11:34 UTC 2024
    - 6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/default_quant_params.mlir

    // CHECK-NEXT:   "tf.LayerNorm"(%arg4, %arg5, %arg6, %arg7) {_tfl_quant_trait = "fully_quantizable", device = ""} : (tensor<128x128xf32>, tensor<1xf32>, tensor<1xf32>, tensor<1xi32>) -> tensor<128x128xf32>
    // CHECK-NEXT:   "tfl.yield"
    // CHECK-NEXT: }) {_tfl_quant_trait = "fully_quantizable", device = ""} :
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 8.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/odml_converter/tests/shlo_simplify.mlir

      %1 = stablehlo.constant dense<[4.0, 6.0]> : tensor<2xf32>
      %2 = stablehlo.divide %0, %1 : tensor<2xf32>
      return %2 : tensor<2xf32>
    }
    
    // CHECK-LABEL: foldDivLHSSplat
    // CHECK: stablehlo.constant dense<[5.000000e-01, 0.333333343]> : tensor<2xf32>
    
    // -----
    
    func.func @foldDivRHSSplat() -> tensor<2xf32> {
      %0 = stablehlo.constant dense<[4.0, 6.0]> : tensor<2xf32>
      %1 = stablehlo.constant dense<2.0> : tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 03:05:20 UTC 2024
    - 2.8K bytes
    - Viewed (0)
Back to top