- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 28 for hasOneUse (0.78 sec)
-
tensorflow/compiler/mlir/lite/transforms/dilated_conv.h
squeeze_op = llvm::cast<TF::SqueezeOp>(consumer_op); if (!expand_op.getResult().hasOneUse()) { return rewriter.notifyMatchFailure( expand_op, "result for current op has more than 1 use"); } if (!squeeze_op.getResult().hasOneUse()) { return rewriter.notifyMatchFailure( squeeze_op, "result for current op has more than 1 use"); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize.cc
first.getStrides().getSplatValue<IntegerAttr>().getInt() != 1 || first.getStrides() != second.getStrides()) return rewriter.notifyMatchFailure(concat, "slice ops must have stride=1"); if (!first->hasOneUse() || !second->hasOneUse()) return rewriter.notifyMatchFailure(concat, "slice ops are used elsewhere"); SmallVector<int64_t> new_start; SmallVector<int64_t> new_limit; SmallVector<int64_t> new_slice_shape;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 26.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc
return failure(); } // Compute inverse of input transpose. llvm::SmallVector<int32_t> inverse_perm_arr = InvertPermutation(perm1_arr); if (!(tpose_arg1->hasOneUse() || tpose_arg2->hasOneUse())) { return failure(); } auto current_out_type = llvm::dyn_cast<RankedTensorType>(op->getResult(0).getType()); auto new_out_type = RankedTensorType::get(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
} // If the contraction is used in multiple places, fusing it will only create // more contraction nodes, which is slower. if (!contraction.getResult().hasOneUse()) return rewriter.notifyMatchFailure(contraction, "result is used by multiple ops"); BiasAddOp bias_add = GetBiasAdd(contraction.getResult()); if (!bias_add) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
quantize_output.replaceAllUsesWith(new_arg); quantize_op.erase(); arg.dropAllUses(); bb.eraseArgument(0); }; // This is looking for a pattern: arg -> tfl.quantize if (arg.hasOneUse() && llvm::isa<QuantizeOp>(*arg.user_begin())) { auto quantize_op = llvm::cast<QuantizeOp>(*arg.user_begin()); remove_quantize_op(quantize_op); continue; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td
(TF_TransposeOp:$transpose_out1 $input, (Arith_ConstantOp:$permutation1 $p1)), (Arith_ConstantOp:$permutation2 $p2)), (TF_TransposeOp $input, (Arith_ConstantOp (RemapPermutation $permutation1, $permutation2))), [(HasOneUse $transpose_out1)]>; // Pattern to fuse trivial reshape op into transpose op def FoldTrivialReshapeIntoTranspose : Pat< (TF_ReshapeOp:$output
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
// in the function while list. if (target_func != func_name && !llvm::is_contained(quantize_allowlist_, func_name)) { return false; } auto has_quantize_op = [&](const Value arg) { return (arg.hasOneUse() && llvm::isa<quantfork::QuantizeCastOp>(*arg.user_begin())); }; bool need_to_set_input_nodes_quantization_params = false; for (const BlockArgument arg : func.getArguments()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
void QuantizationDriver::RequantizeArg(const BlockArgument arg, RequantizeStates& states) { Value value = arg; builder_.setInsertionPointToStart(arg.getOwner()); if (value.hasOneUse()) { Operation* user = value.use_begin().getUser(); if (auto q = dyn_cast<quantfork::QuantizeCastOp>(user)) { value = q.getResult(); builder_.setInsertionPoint(arg.getOwner(), ++Block::iterator(user));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
// If activation exists, omit clipping op. // Since out_scale and out_zp are computed based on clipped range, // explicit activation clipping op is not required. if (isa<AddOp>(next_op) && gemm_style_op->hasOneUse()) { // bias fusion CreateAndReturnQuantizedBiasPattern( next_op, rewriter, entry_func_op, func_result_type, accumulation_quantized_element_type, gemm_style_op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0)