Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 63 for 4x1xf32 (0.2 sec)

  1. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/attributes.mlir

      func.return
    }
    
    // CHECK-LABEL: func @basic
    func.func @basic(
        %arg0: tensor<3x1xf32>,
        %arg1: tensor<!tf_type.resource<tensor<1x3xf32>>>) -> (tensor<3x3xf32>) {
      %1 = "tf.ReadVariableOp"(%arg1) {_output_shapes = ["tfshape$dim { size: 1 } dim { size: 3 }"], device = "/device:CPU:0", dtype = f32} : (tensor<!tf_type.resource<tensor<1x3xf32>>>) -> tensor<1x3xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration_test.cc

        module attributes {} {
          func.func @main(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> attributes {} {
            %0 = stablehlo.constant dense<1.0> : tensor<4x3xf32>
            %1 = stablehlo.dot_general %arg0, %0, contracting_dims = [1] x [0], precision = [DEFAULT, DEFAULT] : (tensor<1x4xf32>, tensor<4x3xf32>) -> tensor<1x3xf32>
            return %1 : tensor<1x3xf32>
          }
        }
      )mlir");
      ASSERT_TRUE(module_op);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 28 21:41:08 UTC 2024
    - 6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/basic.mlir

      // CHECK-NEXT: [[r1:%.*]] = tfrt_fallback_async.executeop {{.*}} "tf.BiasAdd"([[r0]], [[result]])
      %3 = "tf.BiasAdd"(%2, %0) {T = f32, _output_shapes = ["tfshape$dim { size: 3 } dim { size: 3 }"], data_format = "NHWC", device = "/device:CPU:0"} : (tensor<3x3xf32>, tensor<3xf32>) -> tensor<3x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 3.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/tpu_sharding_identification.mlir

    }
    func.func @_func(%arg0: tensor<2x4xf32>, %arg1: tensor<4x2xf32>) -> tensor<2x2xf32> {
      %0 = "tf.MatMul"(%arg0, %arg1) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01"} : (tensor<2x4xf32>, tensor<4x2xf32>) -> tensor<2x2xf32>
      %1 = "tf.Identity"(%0) : (tensor<2x2xf32>) -> tensor<2x2xf32>
      return %1 : tensor<2x2xf32>
    }
    
    // -----
    // The following op sharding is used in the following test case:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 20 19:07:52 UTC 2024
    - 47.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

      %4 = "tf.MatMul"(%arg0, %3) {device = "", transpose_a = false, transpose_b = false} : (tensor<2x3xf32>, tensor<3x4xf32>) -> tensor<2x4xf32>
      %5 = "tf.Identity"(%4) {device = ""} : (tensor<2x4xf32>) -> tensor<2x4xf32>
      %6 = "tf.Identity"(%5) {device = ""} : (tensor<2x4xf32>) -> tensor<2x4xf32>
      func.return %6 : tensor<2x4xf32>
    
      // CHECK-LABEL: QuantDequantTranspose
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td

        ```mlir
          %0 = "tf.Const"() {value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
          %1 = "tf.Const"() {device = "", value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
          %2 = "tf.Const"() {device = "baz", value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
        ```
    
        then running this pass with 'default-device=foobar', we get:
    
        ```mlir
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 99.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

    // CHECK:           %[[VAL_4:.*]]:2 = call @func_0_GPU_FLOAT(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) {tac.device = "GPU", tac.inference_type = "FLOAT", tac.interface_name = "func_0"} : (tensor<1xf32>, tensor<1xf32>, tensor<1xf32>, tensor<1xf32>) -> (tensor<1xf32>, tensor<1xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/const-fold.mlir

      %7 = "tfl.add"(%2, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<  f32>) -> tensor<4xf32>
      %8 = "tfl.add"(%2, %3) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
      %9 = "tfl.add"(%2, %3) {fused_activation_function = "SIGN_BIT"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 45.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir

    // `tfl.concatenation`.
    
    func.func @concatenate_float(%arg0: tensor<3x2xf32>, %arg1: tensor<1x2xf32>) -> tensor<4x2xf32> {
      %0 = "stablehlo.concatenate"(%arg0, %arg1) {dimension = 0 : i64} : (tensor<3x2xf32>, tensor<1x2xf32>) -> tensor<4x2xf32>
      return %0 : tensor<4x2xf32>
    }
    // CHECK-LABEL: concatenate_float
    // CHECK-NOT: tfl.concatenation
    // CHECK: stablehlo.concatenate
    
    // -----
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 106.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfrt/tests/mlrt/tf_to_mlrt.mlir

    // Test for XlaLaunch
    
    func.func private @xla_func_0(%arg0: tensor<1x3xf32>, %arg1: tensor<1x3xf32>) -> tensor<1x3xf32> attributes {tf._XlaMustCompile = true, tf._noinline = true, tf._original_func_name = "should_not_be_used"} {
      %1 = "tf.AddV2"(%arg0, %arg1) {__op_key = 0: i32} : (tensor<1x3xf32>, tensor<1x3xf32>) -> tensor<1x3xf32>
      func.return %1 : tensor<1x3xf32>
    }
    
    // CHECK-LABEL: func @xla_func
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 20:44:15 UTC 2024
    - 24.7K bytes
    - Viewed (0)
Back to top