Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 30 for 10x3xf32 (0.2 sec)

  1. tensorflow/compiler/mlir/tfrt/tests/ifrt/lower_to_ifrt_restore_variable.mlir

        %1 = "tf.VarHandleOp"() <{container = "x", shared_name = "y"}> : () -> tensor<!tf_type.resource<tensor<3x1xf32>>>
        "tf.AssignVariableOp"(%1, %0#0) : (tensor<!tf_type.resource<tensor<3x1xf32>>>, tensor<3x1xf32>) -> ()
        %2 = "tf.VarHandleOp"() <{container = "x", shared_name = "z"}> : () -> tensor<!tf_type.resource<tensor<1x3xf32>>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 20:44:15 UTC 2024
    - 8.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/mlir_passthrough_op.pbtxt

    # CHECK: mlir_module = "\0Afunc @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {\0A %add = \22tf.Add\22(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32>\0A %ret = \22magic.op\22(%add, %add) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>\0A return %ret : tensor<10x10xf32>\0A}\0A"}> {device = ""} : (tensor<10xf32>, tensor<10xf32>) -> tensor<*xf32>
    
    node {
      name: "x"
      op: "Placeholder"
      attr {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 1.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shared_variable_v1.py

    # CHECK:      func {{@[a-zA-Z_0-9]+}}(
    # CHECK-SAME:   [[ARG0:%.*]]: tensor<3x1xf32> {tf_saved_model.index_path = ["x"]},
    # CHECK-SAME:   [[ARG1:%.*]]: tensor<!tf_type.resource<tensor<1x3xf32>>> {tf_saved_model.bound_input = @[[VAR]]})
    # CHECK-SAME:             -> (tensor<3x3xf32> {tf_saved_model.index_path = ["r"]})
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:49:35 UTC 2023
    - 2.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/attributes.mlir

    // CHECK-LABEL: func @basic
    func.func @basic(
        %arg0: tensor<3x1xf32>,
        %arg1: tensor<!tf_type.resource<tensor<1x3xf32>>>) -> (tensor<3x3xf32>) {
      %1 = "tf.ReadVariableOp"(%arg1) {_output_shapes = ["tfshape$dim { size: 1 } dim { size: 3 }"], device = "/device:CPU:0", dtype = f32} : (tensor<!tf_type.resource<tensor<1x3xf32>>>) -> tensor<1x3xf32>
    
      // CHECK: {{%.*}} = tfrt_fallback_async.executeop {{.*}} device("/device:CPU:0") "tf.MatMul"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/device_conversion.mlir

        %arg1: tensor<1x3xf32> {tf_saved_model.index_path = [0]})
          -> (tensor<3x3xf32> {tf_saved_model.index_path = []}) {
      // CHECK: {{%.*}} = corert.get_op_handler %arg0 "/device:GPU:0"
      %2 = "tf.MatMul"(%arg0, %arg1) {T = f32, _output_shapes = ["tfshape$dim { size: 3 } dim { size: 3 }"], device = "/device:GPU:0", transpose_a = false, transpose_b = false} : (tensor<3x1xf32>, tensor<1x3xf32>) -> tensor<3x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 645 bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/basic.mlir

      // CHECK-NEXT: [[out_ch:%.*]] = tfrt.merge.chains [[ch]], [[ch1]] : !tfrt.chain, !tfrt.chain
      // CHECK-NEXT: tfrt.return [[out_ch]], [[r2]] : !tfrt.chain, !tfrt_fallback.tf_tensor
      func.return %6#0 : tensor<3x3xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 3.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration_test.cc

        module attributes {} {
          func.func @main(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> attributes {} {
            %0 = stablehlo.constant dense<1.0> : tensor<4x3xf32>
            %1 = stablehlo.dot_general %arg0, %0, contracting_dims = [1] x [0], precision = [DEFAULT, DEFAULT] : (tensor<1x4xf32>, tensor<4x3xf32>) -> tensor<1x3xf32>
            return %1 : tensor<1x3xf32>
          }
        }
      )mlir");
      ASSERT_TRUE(module_op);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 28 21:41:08 UTC 2024
    - 6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/propagate_quantize_type.mlir

    // CHECK: return %[[GATHER]] : tensor<1x300x10xf32>
    
    // -----
    
    module {
      func.func @propagate_xlagather(%arg0: tensor<10x2xi32>) -> tensor<1x300x10xf32> {
        %cst = "tf.Const"() {value = dense<[1, 1, 300]> : tensor<3xi64>} : () -> tensor<3xi64>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/tests/cast_bf16_ops_to_f32.mlir

      %2 = "tf.Cast"(%1) {Truncate = false} : (tensor<1x2xbf16>) -> tensor<1x2xf32>
      %3 = "tf.IdentityN"(%2) {device = ""} : (tensor<1x2xf32>) -> tensor<1x2xf32>
      return %3 : tensor<1x2xf32>
    }
    
    // CHECK: func @cast_bf16_matmul_to_fp32
    // CHECK-DAG: %[[cst:.*]] = "tf.Const"() <{value = dense<{{.*}}> : tensor<10x2xf32>}> : () -> tensor<10x2xf32>
    // CHECK: %[[matmul:.*]] = "tf.MatMul"(%arg0, %[[cst]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 8.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_xla_selective_quantization.mlir

        %cst = "tf.Const"() {value = dense<0.000000e+00> : tensor<10xf32>} : () -> tensor<10xf32>
        %cst_0 = "tf.Const"() {value = dense<[-1, 10]> : tensor<2xi32>} : () -> tensor<2xi32>
        %1 = "tf.MatMul"(%arg0, %arg1) {
          transpose_a = false, transpose_b = false
        } : (tensor<1x10xf32>, tensor<10x10xf32>) -> tensor<1x10xf32>  loc(fused["MatMul:", "test_opt_out"])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.8K bytes
    - Viewed (0)
Back to top