Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 75 for matmult (0.34 sec)

  1. tensorflow/compiler/jit/mark_for_compilation_pass_test.cc

      // done in parallel.
      //
      // This graph is:
      // (Const0, Const0) -> MatMul0
      // (Const1, Const1) -> MatMul1
      // (MatMul0, MatMul1) -> MatMulCombined
      //
      // Device0: [Const0, Const0, MatMul0]
      // Device1: [Const1, Const1, MatMul1, MatMulCombined]
      //
      // Cluster0: [Const0, Const0, MatMul0]
      // Cluster1: [Const1, Const1, MatMul1]
      // Cluster2: [MatMulCombined]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 10:11:10 UTC 2024
    - 79.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir

    // WholeModel-DAG: "tf.DumpTensor"(%[[m1]]) <{enabled = true, file_name = "unquantized_tensor_data.pb", func_name = "matmul2", log_dir_path = "/tmp/dumps/composite_matmul_fn_1", node_name = "MatMul_1"}
    // WholeModel-DAG: return %[[m1]]
    
    // IntPerLayer-LABEL: func @matmul2
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc

      std::vector<Value> sliced_rhs =
          sliceInput(input_rhs, bcast.y_batch_size(), loc, rewriter);
    
      // Compute (single batch) MatMul for each output batch.
      std::vector<Value> matmuls;
      matmuls.reserve(bcast.output_batch_size());
      for (int batch_idx : llvm::seq<int>(0, bcast.output_batch_size())) {
        int lhs_batch_idx, rhs_batch_idx;
        if (bcast.IsBroadcastingRequired()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc

        if ((matmul->hasAttr("grad_a") &&
             mlir::cast<BoolAttr>(matmul->getAttr("grad_a")).getValue()) ||
            (matmul->hasAttr("grad_b") &&
             mlir::cast<BoolAttr>(matmul->getAttr("grad_b")).getValue())) {
          (void)rewriter.notifyMatchFailure(matmul, [&](Diagnostic &diag) {
            diag << "FusedMatMul kernel does not support grad_a/grad_b attrs";
          });
          return false;
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/matmul.mlir

    Christian Sigg <******@****.***> 1714640622 -0700
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_activity_listener_test.cc

          "/job:localhost/replica:0/task:0/device:CPU:0");
      Output a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT);
      for (int i = 0; i < 5; i++) {
        a = ops::MatMul(root.WithOpName(absl::StrCat("matmul_", i)), a, a);
        a = ops::Add(root.WithOpName(absl::StrCat("add_", i)), a, a);
      }
    
      GraphDef graph_def;
      root.graph()->ToGraphDef(&graph_def);
      return graph_def;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  7. tensorflow/c/c_api_test.cc

                                       "gradients/MatMul", false, true);
        TF_Operation* matmul2 = MatMul(expected_graph_, s_, const0, const3,
                                       "gradients/MatMul_1", true, false);
        expected_grad_outputs[0] = {matmul1, 0};
        expected_grad_outputs[1] = {matmul2, 0};
      }
    
      TF_Tensor* FloatTensor2x2(const float* values) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 03:35:10 UTC 2024
    - 96.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/end2end/unroll_batch_matmul_disabled.pbtxt

    # RUN: tf_tfl_translate -unfold_batchmatmul=false -tf-input-arrays=Placeholder,Placeholder_1 -tf-input-shapes=2,5,3:3,7 -tf-input-data-types=DT_FLOAT,DT_FLOAT -tf-output-arrays=MatMul -output-mlir %s -o - 2>&1 | FileCheck %s
    
    node {
      name: "Placeholder"
      op: "Placeholder"
      attr {
        key: "dtype"
        value {
          type: DT_FLOAT
        }
      }
      attr {
        key: "shape"
        value {
          shape {
            dim {
              size: 2
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.5K bytes
    - Viewed (0)
  9. tensorflow/c/eager/c_api_distributed_test.cc

      ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    
      TFE_Op* matmul = MatMulOp(ctx, h0_task1, h1_task1);
      TFE_OpSetDevice(matmul, remote_device_name, status);
      EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    
      TFE_TensorHandle* retvals[1];
      int num_retvals = 1;
      TFE_Execute(matmul, &retvals[0], &num_retvals, status);
      EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 15 09:49:45 UTC 2024
    - 23.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

      }
      return ConstantFoldOpIfPossible(value.getDefiningOp()).front();
    }
    
    // Matches convolution op with "NHWC" data format or matmul op with false adj_y.
    // The list of supported ops in this function is:
    // - Conv2DOp
    // - Conv3DOp
    // - DepthwiseConv2dNativeOp
    // - MatMulOp
    // - BatchMatMulV2Op
    LogicalResult MatchSupportedAffineOp(Operation* op, Value& binding_output,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
Back to top