- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 58 for mat_mul (0.19 sec)
-
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
DECL_CONVERT_OP(Assert); DECL_CONVERT_OP(ConcatV2); DECL_CONVERT_OP(BatchMatMul); DECL_CONVERT_OP(BatchMatMulV2); DECL_CONVERT_OP(BatchMatMulV3); DECL_CONVERT_OP(MatMul); DECL_CONVERT_OP(MatrixDiagV2); DECL_CONVERT_OP(MatrixDiagV3); DECL_CONVERT_OP(Pack); DECL_CONVERT_OP(Split); DECL_CONVERT_OP(SplitV); DECL_CONVERT_OP(Unpack); DECL_CONVERT_OP(Conv3D);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/control_flow.mlir
%x = "tf.TensorArrayReadV3"(%handle_0, %index, %flow_0) {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : (tensor<2x!tf_type.resource<tensor<?x100xf32>>>, tensor<i32>, tensor<f32>) -> tensor<?x100xf32> %y = "tf.MatMul"(%x, %cst) {device = "/job:localhost/replica:0/task:0/device:CPU:0"} : (tensor<?x100xf32>, tensor<100x512xf32>) -> (tensor<?x512xf32>)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 00:40:32 UTC 2024 - 17.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc
// identifier is the order of that attribute in `attributes`. This map // is then used to set attributes in the quantized functions in the // QuantizeCompositeFunctionsPass. // For example, for tf.MatMul with `attributes` = {{"transpose_a", false}, // {"transpose_b", false}}, the generated attr_map is // "0:transpose_a,1:transpose_b", where 0 and 1 are the respective attribute // identifiers.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 21.8K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.cc
"(LRN, LRNGrad)." " BN: TF FusedBatchNorm* operations." " FUSIBLE: All TF operations that XLA can fuse (All the above). " "You can also put any TF operation name, e.g. 'FUSIBLE,MatMul'."), Flag("tf_xla_cluster_exclude_ops", &mark_for_compilation_flags->tf_xla_cluster_exclude_ops, "(experimental) " "Exclude the operations from auto-clustering. "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc
auto matmul = rewriter.create<TF::BatchMatMulV3Op>( loc, RankedTensorType::get(matmul_shape, result_type.getElementType()), lhs_flattend, rhs_flattend); if (result_type.hasStaticShape()) { auto reshaped = rewriter.create<mhlo::ReshapeOp>(loc, result_type, matmul.getResult()); return reshaped.getResult(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 154.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td
foreach src = [TF_PreventGradientOp, TF_CheckNumericsOp] in def : Pat<(src $op, $msg), (replaceWithValue $op)>; //===----------------------------------------------------------------------===// // MatMul op patterns. //===----------------------------------------------------------------------===// def GetPrecisionConfig: NativeCodeCall< "GetPrecisionConfig(&$_builder)">;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 34.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize.cc
// %1 = mhlo.reshape %param : (1xCxZ) -> CxZ // mhlo.dot_general %input, %1 {batch_dims = []} // To: // mhlo.dot_general %input, %param {batch_dims = [0]} // // This usage will mostly come from tf-unroll-batch-matmul, so it's fine to only // handle the case where batching dim is the leftmost dim. LogicalResult ConvertReshapeDotRhsToBatchedDot(mhlo::DotGeneralOp dot, PatternRewriter &rewriter) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 26.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/passes.h
// Guarantee that all FuncOp's have a single use. std::unique_ptr<OperationPass<ModuleOp>> CreateGuaranteeAllFuncsOneUsePass(); // Optional pass which will unroll BatchMatMul and use only MatMul std::unique_ptr<OperationPass<func::FuncOp>> CreateUnrollBatchMatMulPassPass(); // Optional pass which will map TF BatchMatMul to TF Einsum std::unique_ptr<OperationPass<func::FuncOp>> CreateBatchMatMulToEinsumPass();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 31.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
// // 2. Old fashion Tensor with raw device memory pointer. This case occurs // when the producer is a non-XLA TF GPU kernel or function (e.g. // tf.matmul). // // 3. AsyncValueTensor, containing a PjRtBuffer. This is the legacy mode // and certain device type (e.g. TPU) still uses this path. AsyncValueTensor* av_tensor = AsyncValueTensor::FromTensor(tensor);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize.mlir
func.func @FuseMulWithFullyConnectedWithBias(%arg: tensor<2x512xf32>) -> tensor<2x1024xf32> { %cst_mul = arith.constant dense<2.0> : tensor<512xf32> %cst_weights = arith.constant dense<3.0> : tensor<1024x512xf32> %cst_bias = arith.constant dense<5.0> : tensor<1024xf32> %0 = "tfl.mul"(%arg, %cst_mul) {fused_activation_function = "NONE"} : (tensor<2x512xf32>, tensor<512xf32>) -> tensor<2x512xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 284.1K bytes - Viewed (0)