Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 156 for mat_mul (0.28 sec)

  1. tensorflow/compiler/aot/tests/tfcompile_test.cc

        matmul.arg0(1, 0) = 4;
        matmul.arg0(1, 1) = 5;
        matmul.arg0(1, 2) = 6;
    
        matmul.arg1(0, 0) = 7;
        matmul.arg1(0, 1) = 8;
        matmul.arg1(1, 0) = 9;
        matmul.arg1(1, 1) = 10;
        matmul.arg1(2, 0) = 11;
        matmul.arg1(2, 1) = 12;
    
        EXPECT_TRUE(matmul.Run());
        EXPECT_EQ(matmul.error_msg(), "");
        const float results[4] = {58, 64, 139, 154};
        for (int i = 0; i < 4; ++i) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 26.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/device_copy.mlir

    func.func @fold_identity_n_test(%arg0: tensor<2x2xf32>, %arg1: tensor<2x2xf32>) -> (tensor<2x2xf32>, tensor<2x2xf32>) {
      // CHECK: tf.MatMul
      %outputs = "tf.MatMul"(%arg0, %arg1) {device = "TPU", transpose_a = false, transpose_b = false} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
      %outputs_0 = "tf.MatMul"(%arg0, %arg1) {device = "TPU", transpose_a = false, transpose_b = false} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 12:06:33 UTC 2022
    - 5.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.td

       (IsInt8ElementType $weight),
       (IsConstTensor $weight),
       (IsInt32ElementType $matmul),
       (HasStaticShapeConstraint $weight)],
      [], (addBenefit 10)>;
    
    // Convert Matmul with hybrid inputs (f32 activation/int8 weight) to XlaDotV2
    def ConvertTFMatMulToXLADotV2OpWeightOnly : Pat<
      (TF_MatMulOp:$matmul
        $input,
        (TF_MulOp (TF_CastOp (TF_IdentityOp $weight), $truncate1), $scale),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 21.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/end2end/back2back_fake_quant.pbtxt

      input: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp"
      input: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp_1"
      attr {
        key: "narrow_range"
        value {
          b: false
        }
      }
      attr {
        key: "num_bits"
        value {
          i: 8
        }
      }
    }
    node {
      name: "sequential/quant_dense/MatMul/kquant/IdentityN"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 15 19:42:47 UTC 2021
    - 25.9K bytes
    - Viewed (0)
  5. tensorflow/c/c_api_test.cc

                                       "gradients/MatMul", false, true);
        TF_Operation* matmul2 = MatMul(expected_graph_, s_, const0, const3,
                                       "gradients/MatMul_1", true, false);
        expected_grad_outputs[0] = {matmul1, 0};
        expected_grad_outputs[1] = {matmul2, 0};
      }
    
      TF_Tensor* FloatTensor2x2(const float* values) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 03:35:10 UTC 2024
    - 96.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/multi_arguments_results_v1.py

    # CHECK-DAG: %[[MUL1:.*]] = "tf.MatMul"(%[[ARG0]], %[[ARG1]])
    # CHECK-DAG: %[[MUL2:.*]] = "tf.MatMul"(%[[ARG1]], %[[ARG0]])
    # CHECK:  %[[IDENTITY:.*]]:2 = "tf.IdentityN"(%[[MUL1]], %[[MUL2]])
    # CHECK: return %[[IDENTITY]]#1, %[[IDENTITY]]#0
    
    
    def Test():
    
      x = tf.constant(1.0, shape=(5, 3))
      y = tf.constant(1.0, shape=(3, 5))
    
      s = tf.matmul(x, y)
      t = tf.matmul(y, x)
      [t, s] = array_ops.identity_n([t, s])
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 3.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/end2end/unroll_batch_matmul_disabled.pbtxt

    # RUN: tf_tfl_translate -unfold_batchmatmul=false -tf-input-arrays=Placeholder,Placeholder_1 -tf-input-shapes=2,5,3:3,7 -tf-input-data-types=DT_FLOAT,DT_FLOAT -tf-output-arrays=MatMul -output-mlir %s -o - 2>&1 | FileCheck %s
    
    node {
      name: "Placeholder"
      op: "Placeholder"
      attr {
        key: "dtype"
        value {
          type: DT_FLOAT
        }
      }
      attr {
        key: "shape"
        value {
          shape {
            dim {
              size: 2
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/merge_duplicate_resource_ops.mlir

        %outputs_7, %control_8 = tf_executor.island wraps "tf.Const"() {value = dense<"MatMul/b_0"> : tensor<1x!tf_type.string>} : () -> tensor<1x!tf_type.string>
        %outputs_9, %control_10 = tf_executor.island wraps "tf.VarHandleOp"() {container = "", shared_name = "MatMul/b_0"} : () -> tensor<!tf_type.resource<tensor<20x4096xf32>>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 26 04:26:16 UTC 2023
    - 10.5K bytes
    - Viewed (0)
  9. tensorflow/cc/framework/gradients_test.cc

          auto dv = Const(scope, {{1.0, 1.0}, {1.0, 1.0}});
          auto dt = MatMul(scope, dv, u, MatMul::TransposeB(true));
          auto du = MatMul(scope, t, dv, MatMul::TransposeA(true));
    
          auto dz = Const(scope, {{1.0, 1.0}, {1.0, 1.0}});
          auto dx = MatMul(scope, dz, y, MatMul::TransposeB(true));
          auto dy = MatMul(scope, x, dz, MatMul::TransposeA(true));
        } else {
          // Call AddSymbolicGradients.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 15 15:13:38 UTC 2023
    - 25K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc

        }
        // FusedMatMul kernel does not support grad_a/grad_b attrs
        if ((matmul->hasAttr("grad_a") &&
             mlir::cast<BoolAttr>(matmul->getAttr("grad_a")).getValue()) ||
            (matmul->hasAttr("grad_b") &&
             mlir::cast<BoolAttr>(matmul->getAttr("grad_b")).getValue())) {
          (void)rewriter.notifyMatchFailure(matmul, [&](Diagnostic &diag) {
            diag << "FusedMatMul kernel does not support grad_a/grad_b attrs";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.9K bytes
    - Viewed (0)
Back to top