- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 55 for matmul_0 (0.2 sec)
-
tensorflow/cc/framework/gradients_test.cc
auto dv = Const(scope, {{1.0, 1.0}, {1.0, 1.0}}); auto dt = MatMul(scope, dv, u, MatMul::TransposeB(true)); auto du = MatMul(scope, t, dv, MatMul::TransposeA(true)); auto dz = Const(scope, {{1.0, 1.0}, {1.0, 1.0}}); auto dx = MatMul(scope, dz, y, MatMul::TransposeB(true)); auto dy = MatMul(scope, x, dz, MatMul::TransposeA(true)); } else { // Call AddSymbolicGradients.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 15 15:13:38 UTC 2023 - 25K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir
// WholeModel-DAG: "tf.DumpTensor"(%[[m1]]) <{enabled = true, file_name = "unquantized_tensor_data.pb", func_name = "matmul2", log_dir_path = "/tmp/dumps/composite_matmul_fn_1", node_name = "MatMul_1"} // WholeModel-DAG: return %[[m1]] // IntPerLayer-LABEL: func @matmul2
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 37.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_weights.mlir
// CHECK: %[[MATMUL_3:.*]] = "tf.MatMul"(%arg0, %[[ORIGINAL_IDENTITY]]) <{transpose_a = false, transpose_b = false}> {attr_map = "0:transpose_a,1:transpose_a", device = ""} : (tensor<1x2x2x2xf32>, tensor<2x1024xf32>) -> tensor<*xf32> // CHECK: return %[[MATMUL_1]], %[[MATMUL_2]], %[[MATMUL_3]] : tensor<*xf32>, tensor<*xf32>, tensor<*xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 42K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op_stablehlo.mlir
// WholeModel-DAG: "tf.DumpTensor"(%[[matmul0_q]]) <{enabled = true, file_name = "unquantized_tensor_data.pb", func_name = "composite_dot_general_with_bias_and_relu6_dynamic_fn_2", log_dir_path = "/tmp/dumps/composite_dot_general_with_bias_and_relu6_dynamic_fn_2", node_name = "_empty_node"}> : (tensor<?x2xf32>) -> ()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 18K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc
std::vector<Value> sliced_rhs = sliceInput(input_rhs, bcast.y_batch_size(), loc, rewriter); // Compute (single batch) MatMul for each output batch. std::vector<Value> matmuls; matmuls.reserve(bcast.output_batch_size()); for (int batch_idx : llvm::seq<int>(0, bcast.output_batch_size())) { int lhs_batch_idx, rhs_batch_idx; if (bcast.IsBroadcastingRequired()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/c/eager/custom_device_test.cc
num_retvals = 1; TFE_Execute(matmul.get(), &retval, &num_retvals, status.get()); ASSERT_NE(TF_OK, TF_GetCode(status.get())); ASSERT_TRUE(absl::StrContains(TF_Message(status.get()), custom0)); ASSERT_TRUE(absl::StrContains(TF_Message(status.get()), custom1)); // Custom device: mix of custom/physical places the op on the custom device.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 27 23:39:24 UTC 2020 - 18.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
// Performs a fusion of the following pattern(s), if possible: // MatMulOp + BiasAdd + <Activation> -> _FusedMatMulOp class FuseMatMulBiasAdd : public FuseContractionWithBiasAdd<MatMulOp, _FusedMatMulOp> { using FuseContractionWithBiasAdd<MatMulOp, _FusedMatMulOp>::FuseContractionWithBiasAdd; bool AreFuseCompatible(MatMulOp matmul, BiasAddOp bias_add,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0) -
tensorflow/c/eager/c_api_unified_experimental_test.cc
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); // Build an abstract operation. auto* matmul_op = TF_NewAbstractOp(graph_ctx); TF_AbstractOpSetOpType(matmul_op, "MatMul", status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); TF_AbstractOpSetOpName(matmul_op, "my_matmul", status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 19 21:44:52 UTC 2023 - 39.1K bytes - Viewed (0) -
tensorflow/c/c_api_experimental_test.cc
TFE_Context* tfe_context_; }; TEST_F(ShapeInferenceTest, InfersShapesFromInputShapes) { TFE_Op* matmul_op; matmul_op = TFE_NewOp(tfe_context_, "MatMul", status_); CHECK_EQ(TF_OK, TF_GetCode(status_)) << TF_Message(status_); // Infer shape when everything is known. CheckOutputShapes(matmul_op, /*input_shapes*/ {make_shape({3, 2}), make_shape({2, 4})}, /*input_tensors*/ {},
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 17 22:27:52 UTC 2023 - 13.1K bytes - Viewed (0) -
tensorflow/compiler/aot/tests/tfcompile_test.cc
matmul.arg0(1, 0) = 4; matmul.arg0(1, 1) = 5; matmul.arg0(1, 2) = 6; matmul.arg1(0, 0) = 7; matmul.arg1(0, 1) = 8; matmul.arg1(1, 0) = 9; matmul.arg1(1, 1) = 10; matmul.arg1(2, 0) = 11; matmul.arg1(2, 1) = 12; EXPECT_TRUE(matmul.Run()); EXPECT_EQ(matmul.error_msg(), ""); const float results[4] = {58, 64, 139, 154}; for (int i = 0; i < 4; ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 26.4K bytes - Viewed (0)