Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 63 for 2x8xf32 (0.13 sec)

  1. tensorflow/compiler/mlir/lite/tests/post-quantize.mlir

    }
    
    func.func @main2(%arg0: tensor<2x4xf32>, %arg1: tensor<2x4xf32>) -> tensor<2x4xf32> {
      %0 = "tfl.quantize"(%arg0) {qtype = tensor<2x4x!quant.uniform<u8:f32, 0.49803921568627452>>} : (tensor<2x4xf32>) -> tensor<2x4x!quant.uniform<u8:f32, 0.49803921568627452>>
      %1 = "tfl.quantize"(%arg1) {qtype = tensor<2x4x!quant.uniform<u8:f32, 0.49803921568627452>>} : (tensor<2x4xf32>) -> tensor<2x4x!quant.uniform<u8:f32, 0.49803921568627452>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/attributes.mlir

      func.return
    }
    
    // CHECK-LABEL: func @basic
    func.func @basic(
        %arg0: tensor<3x1xf32>,
        %arg1: tensor<!tf_type.resource<tensor<1x3xf32>>>) -> (tensor<3x3xf32>) {
      %1 = "tf.ReadVariableOp"(%arg1) {_output_shapes = ["tfshape$dim { size: 1 } dim { size: 3 }"], device = "/device:CPU:0", dtype = f32} : (tensor<!tf_type.resource<tensor<1x3xf32>>>) -> tensor<1x3xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/lstm.json

    // CHECK-DAG: %[[input_18:.*]] = "quantfork.stats"({{.*}}) <{layerStats = dense<[-8.000000e-01, 1.600000e+00]> : tensor<2xf32>}> : (tensor<1x4xf32>) -> tensor<1x4xf32>
    // CHECK-DAG: %[[input_19:.*]] = "quantfork.stats"({{.*}}) <{layerStats = dense<[-2.000000e+00, 4.000000e+00]> : tensor<2xf32>}> : (tensor<1x2xf32>) -> tensor<1x2xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 06:25:50 UTC 2024
    - 9.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/tfl_while_outline.mlir

        %9 = "tfl.fully_connected"(%8, %cst_10, %cst_0) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<4x5xf32>, tensor<8x5xf32>, tensor<8xf32>) -> tensor<4x8xf32>
        %10:4 = "tfl.split"(%cst_5, %9) {num_splits = 4 : i32} : (tensor<i32>, tensor<4x8xf32>) -> (tensor<4x2xf32>, tensor<4x2xf32>, tensor<4x2xf32>, tensor<4x2xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

      func.return %3 : tensor<2x1xf32>
    }
    }
    
    // CHECK:   func @simpleTest(%[[VAL_0:.*]]: tensor<1xf32>, %[[VAL_1:.*]]: tensor<1xf32>, %[[VAL_2:.*]]: tensor<1xf32>, %[[VAL_3:.*]]: tensor<1xf32>) -> tensor<2x1xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td

        ```mlir
          %0 = "tf.Const"() {value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
          %1 = "tf.Const"() {device = "", value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
          %2 = "tf.Const"() {device = "baz", value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
        ```
    
        then running this pass with 'default-device=foobar', we get:
    
        ```mlir
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 99.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/mark_ops_for_outside_compilation.mlir

        %2:2 = "tf.RecvTPUEmbeddingActivations"() {_tpu_embedding_layer = "call1", config = "\0A\0B\0C\0D"} : () -> (tensor<2x2xf32>, tensor<4x4xf32>)
        "tf.SendTPUEmbeddingGradients"(%2#0, %2#1) {_tpu_embedding_layer = "call1", config = "\0A\0B\0C\0D", operandSegmentSizes = array<i32: 2, 0>} : (tensor<2x2xf32>, tensor<4x4xf32>) -> ()
        tf_device.return
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 16:22:32 UTC 2024
    - 29.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir

      %12 = mhlo.clamp %9, %11, %1 : tensor<2xf32>
      %13 = mhlo.multiply %arg0, %12 : tensor<2xf32>
      %14 = mhlo.divide %13, %1 : tensor<2xf32>
      return %14 : tensor<2xf32>
    }
    
    // CHECK-LABEL:   func.func @hardswish(
    // CHECK-SAME:                         %[[VAL_0:.*]]: tensor<2xf32>) -> tensor<*xf32> {
    // CHECK:           %[[VAL_1:.*]] = "tfl.hard_swish"(%[[VAL_0]]) : (tensor<2xf32>) -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 32.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/ops.mlir

      func.return %24 : tensor<1x4xf32>
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc

        } else {
          // Recurse on the subtypes in the variant/resource. Basically if the input
          // were:
          //   tensor<!tf_type.variant<tensor<?x8xf32>>>
          // and:
          //   tensor<!tf_type.variant<tensor<10x8xf32>>>
          // we'll try here to refine tensor<?x8xf32> with tensor<10x8xf32>.
          auto refined_subtype = mlir::cast<TensorType>(
              TypeMeet(lhs_element_type_with_subtype.GetSubtypes().front(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 07:28:49 UTC 2024
    - 134.1K bytes
    - Viewed (0)
Back to top