- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 40 for hasOneUse (0.9 sec)
-
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
(FloatValueEquals<"0.797884583"> $Cst_sqrt_2dPi), (FloatValueEquals<"0.044715"> $Coeff), (HasOneUse $mul_out), (HasOneUse $add_out), (HasOneUse $tanh_out), (HasOneUse $mul_out1), (HasOneUse $add_out1), (HasOneUse $mul_out2), (HasOneUse $pow_out), ]>; // Alternate pattern for GeluApproximate (see different order for mul), replaces
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/dilated_conv.h
squeeze_op = llvm::cast<TF::SqueezeOp>(consumer_op); if (!expand_op.getResult().hasOneUse()) { return rewriter.notifyMatchFailure( expand_op, "result for current op has more than 1 use"); } if (!squeeze_op.getResult().hasOneUse()) { return rewriter.notifyMatchFailure( squeeze_op, "result for current op has more than 1 use"); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize.cc
first.getStrides().getSplatValue<IntegerAttr>().getInt() != 1 || first.getStrides() != second.getStrides()) return rewriter.notifyMatchFailure(concat, "slice ops must have stride=1"); if (!first->hasOneUse() || !second->hasOneUse()) return rewriter.notifyMatchFailure(concat, "slice ops are used elsewhere"); SmallVector<int64_t> new_start; SmallVector<int64_t> new_limit; SmallVector<int64_t> new_slice_shape;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 26.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc
return failure(); } // Compute inverse of input transpose. llvm::SmallVector<int32_t> inverse_perm_arr = InvertPermutation(perm1_arr); if (!(tpose_arg1->hasOneUse() || tpose_arg2->hasOneUse())) { return failure(); } auto current_out_type = llvm::dyn_cast<RankedTensorType>(op->getResult(0).getType()); auto new_out_type = RankedTensorType::get(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize_layout.cc
RankedTensorType pad_type = pad_op.getType().cast<RankedTensorType>(); auto transpose_op = pad_input.getDefiningOp<stablehlo::TransposeOp>(); if (!transpose_op || !transpose_op->hasOneUse()) return failure(); Value transpose_input = transpose_op.getOperand(); ArrayRef<int64_t> transpose_perm = transpose_op.getPermutation(); SmallVector<int64_t> new_padding_low =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 21:59:06 UTC 2024 - 8.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
} // If the contraction is used in multiple places, fusing it will only create // more contraction nodes, which is slower. if (!contraction.getResult().hasOneUse()) return rewriter.notifyMatchFailure(contraction, "result is used by multiple ops"); BiasAddOp bias_add = GetBiasAdd(contraction.getResult()); if (!bias_add) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
quantize_output.replaceAllUsesWith(new_arg); quantize_op.erase(); arg.dropAllUses(); bb.eraseArgument(0); }; // This is looking for a pattern: arg -> tfl.quantize if (arg.hasOneUse() && llvm::isa<QuantizeOp>(*arg.user_begin())) { auto quantize_op = llvm::cast<QuantizeOp>(*arg.user_begin()); remove_quantize_op(quantize_op); continue; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td
(TF_TransposeOp:$transpose_out1 $input, (Arith_ConstantOp:$permutation1 $p1)), (Arith_ConstantOp:$permutation2 $p2)), (TF_TransposeOp $input, (Arith_ConstantOp (RemapPermutation $permutation1, $permutation2))), [(HasOneUse $transpose_out1)]>; // Pattern to fuse trivial reshape op into transpose op def FoldTrivialReshapeIntoTranspose : Pat< (TF_ReshapeOp:$output
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
// - gather_nd->transpose->gather_nd->transpose. if (!gather_nd_first->hasOneUse()) return failure(); auto transpose_first = dyn_cast_or_null<TFL::TransposeOp>(*(gather_nd_first->user_begin())); if (!transpose_first || !transpose_first->hasOneUse()) return failure(); auto gather_nd_second = dyn_cast_or_null<TFL::GatherNdOp>(*(transpose_first->user_begin()));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
// in the function while list. if (target_func != func_name && !llvm::is_contained(quantize_allowlist_, func_name)) { return false; } auto has_quantize_op = [&](const Value arg) { return (arg.hasOneUse() && llvm::isa<quantfork::QuantizeCastOp>(*arg.user_begin())); }; bool need_to_set_input_nodes_quantization_params = false; for (const BlockArgument arg : func.getArguments()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0)