- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 150 for RewritePatternSet (0.22 sec)
-
tensorflow/compiler/mlir/lite/transforms/reduce_type_precision.cc
public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ReduceTypePrecisionPass) void runOnOperation() override; }; void ReduceTypePrecisionPass::runOnOperation() { RewritePatternSet patterns(&getContext()); patterns.add<CheckRangeAndConvertI8ToI4, SanitizeGatherOpOutputToI4>( &getContext()); (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/remove_unused_while_results.cc
TryPruneResultDefiningOp(while_op, result); } }); // Now eliminate passthrough operands/results with existing canonicalization // pattern. MLIRContext* context = &getContext(); RewritePatternSet patterns(context); TF::WhileRegionOp::getCanonicalizationPatterns(patterns, context); if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) { signalPassFailure(); } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Nov 16 01:49:07 UTC 2022 - 5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/batchmatmul_to_einsum.cc
struct BatchMatMulToEinsumPass : public impl::BatchMatMulToEinsumPassBase<BatchMatMulToEinsumPass> { void runOnOperation() override; }; void BatchMatMulToEinsumPass::runOnOperation() { RewritePatternSet patterns(&getContext()); auto func = getOperation(); patterns.add<ConvertTFBatchMatMulToEinsumOp<TF::BatchMatMulOp>, ConvertTFBatchMatMulToEinsumOp<TF::BatchMatMulV2Op>>( &getContext());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/remove_var_init_by_const.cc
// ops will be erased during the optimization. rewriter.eraseOp(assign_op); } }; void RemoveVariableInitializationByConstPass::runOnOperation() { MLIRContext& ctx = getContext(); RewritePatternSet patterns(&ctx); patterns.add<RemoveVariableAssignmentByConst>(&ctx); ModuleOp module_op = getOperation(); func::FuncOp init_func_op = GetInitializerFunction(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 03 12:04:03 UTC 2023 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/cast_bf16_ops_to_f32.cc
} } }; #include "tensorflow/compiler/mlir/quantization/tensorflow/passes/cast_bf16_ops_to_f32.inc" void CastBf16OpsToF32Pass::runOnOperation() { MLIRContext* ctx = &getContext(); RewritePatternSet patterns(ctx); auto module_op = getOperation(); patterns.add<CastBf16OpsToF32>(ctx); populateWithGenerated(patterns); if (failed(applyPatternsAndFoldGreedily(module_op, std::move(patterns)))) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/ConvertConst.cc
rewriter.replaceOpWithNewOp<StorageCastOp>(qbarrier, qbarrier.getType(), newConstOp); return success(); } void ConvertConstPass::runOnOperation() { RewritePatternSet patterns(&getContext()); auto func = getOperation(); auto *context = &getContext(); patterns.add<QuantizedConstRewrite>(context); (void)applyPatternsAndFoldGreedily(func, std::move(patterns)); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_stablehlo_custom_call_to_composite.cc
target.addDynamicallyLegalOp<mlir::stablehlo::CustomCallOp>( [&](mlir::stablehlo::CustomCallOp op) { return op.getCallTargetName() != "stablehlo.composite"; }); RewritePatternSet patterns(context); patterns.add<ReplaceCustomCallWithComposite>(context); if (failed(applyPartialConversion(getOperation(), target, std::move(patterns)))) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc
return success(); } }; void MergeFusionWithDequantizePass::runOnOperation() { ModuleOp module_op = getOperation(); MLIRContext* ctx = module_op.getContext(); RewritePatternSet patterns(ctx); patterns.add<MergeFusionWithUniformDequantizePattern>(ctx); if (failed(applyPatternsAndFoldGreedily(module_op, std::move(patterns)))) { signalPassFailure(); } } } // namespace
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/gpu_fusion.cc
if (add_op) { rewriter.replaceOp(add_op, op->getResult(0)); } return success(); } }; void GpuOpFusionPass::runOnOperation() { func::FuncOp func = getOperation(); RewritePatternSet patterns(&getContext()); patterns.add<ReluToFusedBatchNorm>(&getContext()); (void)applyPatternsAndFoldGreedily(func, std::move(patterns)); } } // namespace
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 03 12:35:38 UTC 2022 - 5.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_custom_aggregation_op_to_quant_stats.cc
} }; static PassRegistration<ConvertCustomAggregationOpToQuantStatsPass> pass; void ConvertCustomAggregationOpToQuantStatsPass::runOnOperation() { MLIRContext *ctx = &getContext(); RewritePatternSet patterns(ctx); func::FuncOp func = getOperation(); patterns.add<ConvertCustomAggregationOpToQuantStats>(ctx); if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) { func.emitError()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.8K bytes - Viewed (0)