- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 32 for matmul_0 (0.14 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir
// WholeModel-DAG: "tf.DumpTensor"(%[[m1]]) <{enabled = true, file_name = "unquantized_tensor_data.pb", func_name = "matmul2", log_dir_path = "/tmp/dumps/composite_matmul_fn_1", node_name = "MatMul_1"} // WholeModel-DAG: return %[[m1]] // IntPerLayer-LABEL: func @matmul2
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 37.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op_stablehlo.mlir
// WholeModel-DAG: "tf.DumpTensor"(%[[matmul0_q]]) <{enabled = true, file_name = "unquantized_tensor_data.pb", func_name = "composite_dot_general_with_bias_and_relu6_dynamic_fn_2", log_dir_path = "/tmp/dumps/composite_dot_general_with_bias_and_relu6_dynamic_fn_2", node_name = "_empty_node"}> : (tensor<?x2xf32>) -> ()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 18K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc
std::vector<Value> sliced_rhs = sliceInput(input_rhs, bcast.y_batch_size(), loc, rewriter); // Compute (single batch) MatMul for each output batch. std::vector<Value> matmuls; matmuls.reserve(bcast.output_batch_size()); for (int batch_idx : llvm::seq<int>(0, bcast.output_batch_size())) { int lhs_batch_idx, rhs_batch_idx; if (bcast.IsBroadcastingRequired()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
// Performs a fusion of the following pattern(s), if possible: // MatMulOp + BiasAdd + <Activation> -> _FusedMatMulOp class FuseMatMulBiasAdd : public FuseContractionWithBiasAdd<MatMulOp, _FusedMatMulOp> { using FuseContractionWithBiasAdd<MatMulOp, _FusedMatMulOp>::FuseContractionWithBiasAdd; bool AreFuseCompatible(MatMulOp matmul, BiasAddOp bias_add,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
} return ConstantFoldOpIfPossible(value.getDefiningOp()).front(); } // Matches convolution op with "NHWC" data format or matmul op with false adj_y. // The list of supported ops in this function is: // - Conv2DOp // - Conv3DOp // - DepthwiseConv2dNativeOp // - MatMulOp // - BatchMatMulV2Op LogicalResult MatchSupportedAffineOp(Operation* op, Value& binding_output,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/c/eager/c_api_distributed_test.cc
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TFE_Op* matmul = MatMulOp(ctx, h0_task1, h1_task1); TFE_OpSetDevice(matmul, remote_device_name, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TFE_TensorHandle* retvals[1]; int num_retvals = 1; TFE_Execute(matmul, &retvals[0], &num_retvals, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 15 09:49:45 UTC 2024 - 23.5K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/math_grad.cc
std::string name_grad_B = "MatMul_Grad_B"; if (!t_a && !t_b) { TF_RETURN_IF_ERROR(MatMul(ctx, upstream_grad, B.get(), &matmul_A_output, /*transpose_a = */ false, /*transpose_b = */ true, name_grad_A.c_str())); TF_RETURN_IF_ERROR(MatMul(ctx, A.get(), upstream_grad, &matmul_B_output, /*transpose_a = */ true,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 28 13:53:47 UTC 2024 - 15.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py
out = math_ops.matmul(input_tensor, self.filters, name='sample/matmul') if bias_fn is not None: out = bias_fn(out, self.bias) if activation_fn is not None: out = activation_fn(out) return {'output': out} model = MatmulModel(weight_shape) saved_model_save.save( model, saved_model_path, signatures=model.matmul.get_concrete_function(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 18.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td
(TF_SubOp $beta, (TF_MulOp $m, $mul)))>; class TFi32<int v> : ConstantAttr<I32ElementsAttr, !cast<string>(v)>; // Matmul without transpose on b to matmul with explicit transpose op and // transposed b. def ConvertMatmulWithoutTransposeToWithTranspose : Pat<(TF_MatMulOp $a, $b, ConstBoolAttrFalse:$at, ConstBoolAttrFalse, $grad_a, $grad_b),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
std::unique_ptr<OperationPass<mlir::func::FuncOp>> CreateOptimizePass(); // Creates an instance of the ReplaceCastHacksWithTFXLAOpsPass, which will // replace mixed-type convolution and matmul cast hacks by XLA Conv2DOp and // MatmulOp. std::unique_ptr<OperationPass<func::FuncOp>> CreateReplaceCastHacksWithTFXLAOpsPass(); // Creates a pass that moves & merges initializer function's ops into the @main
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0)