Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 229 for transposes (0.16 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.td

    def LiftMatMul : Pat<
      (TF_MatMulOp:$res $a, $b, $transpose_a, $transpose_b, $grad_a, $grad_b),
      (LiftAsTFPartitionedCall<"composite_matmul_fn">
        (ArgumentList $a, $b),
        (ResultList $res),
        (NamedAttributeList
          (NamedAttr<"transpose_a"> $transpose_a),
          (NamedAttr<"transpose_b"> $transpose_b))),
      [(IsNotInLiftedFunc $res)], [], (addBenefit 1)>;
    
    def LiftConv3D : Pat<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 15.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/device_copy.mlir

      // CHECK: tf.MatMul
      %outputs = "tf.MatMul"(%arg0, %arg1) {device = "TPU", transpose_a = false, transpose_b = false} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
      %outputs_0 = "tf.MatMul"(%arg0, %arg1) {device = "TPU", transpose_a = false, transpose_b = false} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
      // CHECK-NOT: tf.IdentityN
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 12:06:33 UTC 2022
    - 5.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/unroll-batch-matmul.mlir

      // CHECK: %[[MATMUL_4:.*]] = "tf.MatMul"(%[[LHS_4]], %[[RHS_4]]) <{grad_a = false, grad_b = false, transpose_a = false, transpose_b = false}> : (tensor<4x5xf32>, tensor<5x6xf32>) -> tensor<4x6xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 06 18:42:28 UTC 2023
    - 63.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.td

        (TF_SubOp (TF_CastOp $input, $truncate), $input_zp),
        (TF_CastOp (TF_IdentityOp $weight), $truncate1),
        $transpose_a, $transpose_b, $grad_a, $grad_b),
      (CreateXlaDotV2OpFromTfMatMulOp
        $input, $weight, $input_zp,
        /*weight_zp=*/(CreateScalarIntegerConst<"int32_t", "0">), $matmul,
        $transpose_a, $transpose_b),
      [(IsInt8ElementType $input),
       (IsInt8ElementType $weight),
       (IsConstTensor $input_zp),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 21.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composit_functions_debugging.mlir

        return %0 : tensor<2x2xf32>
      }
      func.func private @composite_matmul_fn_2_0(%arg0: tensor<2x2xf32>, %arg1: tensor<2x2xf32>) -> tensor<2x2xf32> attributes {tf_quant.composite_function} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 06 01:23:21 UTC 2023
    - 80.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfrt/tests/remove_device_attribute.mlir

      %0 = corert.get_op_handler %arg0 "cpu"
      // CHECK: %[[RESULT:.*]] = corert.executeop(%[[ARG_0:.*]]) "tf.MatMul"(%[[ARG_1:.*]], %[[ARG_1]]) {T = f32, transpose_a = false, transpose_b = false} : 1
      %1 = corert.executeop(%0) "tf.MatMul"(%arg1, %arg1) {T = f32, device = "cpu", transpose_a = false, transpose_b = false} : 1
      tfrt.return %arg0, %1 : !tfrt.chain, !corert.tensorhandle
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 25 10:58:25 UTC 2022
    - 560 bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.td

    def LiftMatMul : Pat<
      (TF_MatMulOp:$res $a, $b, $transpose_a, $transpose_b, $grad_a, $grad_b),
      (LiftAsTFPartitionedCall<"composite_matmul_fn">
        (ArgumentList $a, $b),
        (ResultList $res),
        (NamedAttributeList
          (NamedAttr<"transpose_a"> $transpose_a),
          (NamedAttr<"transpose_b"> $transpose_b))),
      [(IsNotInLiftedFunc $res), (IsConstTensor $b)], [], (addBenefit 1)>;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 3.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/g3doc/space_to_depth.md

    disable this feature as well.
    
    ### Fuse SpaceToDepth with Automatic Double Transpose
    
    The transpose and reshape op in SpaceToDepthOp on TPU hosts may cause image
    model to be infeed bound. To reduce host time, space to depth transform can be
    fused with `automatic double transpose` to reduce extra overhead on the host.
    
    ### Extend from Conv2D to Conv3D
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Oct 24 02:51:43 UTC 2020
    - 8.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc

            return failure();
          }
        }
    
        // Input rhs must be a constant with rank 2.
        if (constant.getType().getRank() != 2) return failure();
    
        // Create a tfl.transpose op that performs ZX transpose on `input`.
        auto create_z_x_transpose_op = [&](Value input) -> Value {
          RankedTensorType input_type =
              mlir::cast<RankedTensorType>(input.getType());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/propagate_quantize_type.mlir

        %0 = "tf.Identity"(%cst) : (tensor<2x1024xi8>) -> tensor<2x1024xi8>
        %1 = "tf.Cast"(%0) {Truncate = false} : (tensor<2x1024xi8>) -> tensor<2x1024xf32>
        %2 = "tf.MatMul"(%arg0, %1) {attr_map = "0:transpose_a,1:transpose_a", device = "", transpose_a = false, transpose_b = false} : (tensor<1x2x2x2xf32>, tensor<2x1024xf32>) -> tensor<*xf32>
        %3 = "tf.Mul"(%2, %cst_0) : (tensor<*xf32>, tensor<f32>) -> tensor<*xf32>
        return %3 : tensor<*xf32>
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.6K bytes
    - Viewed (0)
Back to top