Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 27 for 2x512xf32 (0.19 sec)

  1. tensorflow/compiler/mlir/tfrt/tests/analysis/cost_analysis.mlir

        // 262657 = 1 + 512 + 512 * 512
        // expected-remark@+1 {{Cost: 262657}}
        %2 = "tf.MatMul"(%arg, %1) {device = "/job:localhost/replica:0/task:0/device:CPU:0", transpose_a = false, transpose_b = false} : (tensor<?x512xf32>, tensor<512x512xf32>) -> tensor<?x512xf32>
        // expected-remark@+1 {{Cost: 512}}
        func.return %2 : tensor<?x512xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Aug 14 15:35:49 UTC 2023
    - 12.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_drq.mlir

    func.func @lift_float_matmul(%arg0: tensor<1x12x12x512xf32>) -> (tensor<*xf32>, tensor<*xf32>) {
      %cst = "tf.Const"() {value = dense<0.000000e+00> : tensor<512x512xf32>} : () -> tensor<512x512xf32>
      %out_1 = "tf.MatMul"(%arg0, %cst) {
        device = "", transpose_a = false, transpose_b = false
      } : (tensor<1x12x12x512xf32>, tensor<512x512xf32>) -> tensor<*xf32>
      %out_2 = "tf.MatMul"(%arg0, %arg0) {
        device = "", transpose_a = false, transpose_b = true
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir

    func.func @add_with_activation_transpose_rank_two(%arg0: tensor<1x2xf32>) -> tensor<2x1xf32> {
      %0 = stablehlo.constant dense<2.000000e+00> : tensor<2x1xf32>
      %1 = stablehlo.transpose %arg0, dims = [1, 0] : (tensor<1x2xf32>) -> tensor<2x1xf32>
      %2 = stablehlo.add %1, %0 : tensor<2x1xf32>
      return %2 : tensor<2x1xf32>
    }
    // CHECK: %[[TRANSPOSE_0:.+]] = stablehlo.transpose
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 14.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op_stablehlo.mlir

        %2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_dot_general_with_bias_and_relu6_dynamic_fn_2, _original_entry_function = "composite_dot_general_with_bias_and_relu6_dynamic_fn_2", _tfl_quant_trait = "fully_quantizable"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 18K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_weights.mlir

        %cst = "tf.Const"() {device = "", value = dense<1.000000e+01> : tensor<512x512xf32>} : () -> tensor<512x512xf32>
        %cst_sharded = "tf.XlaSharding"(%cst) {_XlaSharding = "\08\03\1A\03\01\04\02\22\08\00\04\01\05\02\06\03\070\01", device = "", sharding = "\08\03\1A\03\01\04\02\22\08\00\04\01\05\02\06\03\070\01", unspecified_dims = []} : (tensor<512x512xf32>) -> tensor<512x512xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 42K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant.mlir

      %2 = "tf.Reshape"(%0, %cst_0) : (tensor<1x2xf32>, tensor<2xi64>) -> tensor<2x1xf32>
      func.return %1, %2 : tensor<2x1xf32>, tensor<2x1xf32>
    
    // CHECK:  %cst = arith.constant
    // CHECK:  %[[FQ:.*]] = "tf.FakeQuantWithMinMaxVars"(%arg0, %arg1, %arg2)
    // CHECK:  %[[R1:.*]] = "tf.Reshape"(%[[FQ]], %cst)
    // CHECK-SAME: tensor<2x1xf32>
    // CHECK:  %[[R2:.*]] = "tf.Reshape"(%[[FQ]], %cst)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant-4bit.mlir

      %2 = "tf.Reshape"(%0, %cst_0) : (tensor<1x2xf32>, tensor<2xi64>) -> tensor<2x1xf32>
      func.return %1, %2 : tensor<2x1xf32>, tensor<2x1xf32>
    
    // CHECK:  %cst = arith.constant
    // CHECK:  %[[FQ:.*]] = "tf.FakeQuantWithMinMaxVars"(%arg0, %arg1, %arg2)
    // CHECK:  %[[R1:.*]] = "tf.Reshape"(%[[FQ]], %cst)
    // CHECK-SAME: tensor<2x1xf32>
    // CHECK:  %[[R2:.*]] = "tf.Reshape"(%[[FQ]], %cst)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 22K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc

          %2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
          return %2 : tensor<?x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/canonicalize.mlir

    func.func @RemoveRedundantUnpackPack(%arg0: tensor<2x5xf32>) -> tensor<2x5xf32> {
      %0:2 = "tfl.unpack"(%arg0) {axis = 0 : i32, num = 2 : i32} : (tensor<2x5xf32>) -> (tensor<5xf32>, tensor<5xf32>)
      %1 = "tfl.pack"(%0#0, %0#1) {axis = 0 : i32, values_count = 2 : i32} : (tensor<5xf32>, tensor<5xf32>) -> (tensor<2x5xf32>)
      func.return %1: tensor<2x5xf32>
      // CHECK-NOT: pack
      // CHECK: return %arg0 : tensor<2x5xf32>
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

        %2 = func.call @func_2_CPU_FLOAT(%0, %1) {tac.interface_name = "func_2"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
        func.return %2 : tensor<2x1xf32>
      }
    
      func.func private @func_2_CPU_FLOAT(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1xf32> attributes {tac.device = "CPU", tac.inference_type = "FLOAT", tac.interface_name = "func_2"} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
Back to top