Search Options

Results per page
Sort
Preferred Languages
Advance

Results 91 - 100 of 150 for RewritePatternSet (0.38 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc

    #include "tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.inc"
    
    void LiftQuantizableSpotsAsFunctionsDRQPass::runOnOperation() {
      MLIRContext* ctx = &getContext();
      RewritePatternSet patterns(ctx);
      ModuleOp module = getOperation();
    
      populateWithGenerated(patterns);
      patterns.add<CheckQuantizableOps>(ctx, quantization_method_, target_opset_,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/passes/nchw_convolution_to_nhwc.cc

      }
    };
    
    }  // namespace
    
    void NchwConvolutionToNhwcPass::runOnOperation() {
      func::FuncOp func_op = getOperation();
      MLIRContext& ctx = getContext();
    
      RewritePatternSet patterns(&ctx);
      patterns.add<RewriteNchwConvolutionToNhwc>(&ctx);
    
      if (failed(applyPatternsAndFoldGreedily(func_op, std::move(patterns)))) {
        func_op.emitError() << "Failed to run NchwConvolutionToNhwcPass.";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/add_quantization_unit_loc.cc

        op->setLoc(unit_loc);
    
        return success();
      }
    };
    
    void AddQuantizationUnitLocPass::runOnOperation() {
      MLIRContext* ctx = &getContext();
      RewritePatternSet patterns(ctx);
      func::FuncOp func = getOperation();
    
      patterns.add<AddQuantizationUnitLoc>(ctx);
      if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/legalize_hashtables.cc

      void runOnOperation() override {
        auto module = getOperation();
    
        if (!checkWhetherGraphHasValidStaticLookupTables(module)) {
          return;
        }
    
        RewritePatternSet patterns(&getContext());
        patterns
            .add<LegalizeHashTableOpPattern, LegalizeHashTableFindOpPattern,
                 LegalizeHashTableImportOpPattern, LegalizeHashTableSizeOpPattern>(
                &getContext());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/lift_quantizable_spots_as_functions.cc

                              std::move(*quantization_method_txtpb)));
        }
      }
      return success();
    }
    
    void LiftQuantizableSpotsAsFunctionsPass::runOnOperation() {
      MLIRContext* ctx = &getContext();
      RewritePatternSet patterns(ctx);
      ModuleOp module_op = getOperation();
    
      simple_patterns::populateWithGenerated(patterns);
      fusion_patterns::populateWithGenerated(patterns);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 9.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc

      // LSTM's restrict_scale requirement should be handled before converting stats
      // to Q-DQ ops. The pattern is applied for non-PTQ case to make op ordering
      // consistent. Otherwise some FileCheck tests would fail.
      RewritePatternSet patterns_1(&getContext());
      if (quant_specs_.post_training_quantization) {
        patterns_1.add<PrepareLstmOutputScale<LSTMOp>>(ctx);
        patterns_1.add<PrepareLstmOutputScale<UnidirectionalSequenceLSTMOp>>(ctx);
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/optimize.cc

    // Canonicalize operations in functions.
    struct TensorFlowOptimizePass
        : public impl::TensorFlowOptimizePassBase<TensorFlowOptimizePass> {
      LogicalResult initialize(MLIRContext *context) override {
        RewritePatternSet pattern_list(context);
        populateWithGenerated(pattern_list);
        pattern_list.add<SimplifyBroadcastReshape>(context);
        patterns = std::move(pattern_list);
        return success();
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_weight.cc

    // options and allow modular control of quantization specs.
    void QuantizeWeightPass::runOnOperation() {
      func::FuncOp func = getOperation();
      MLIRContext* ctx = func.getContext();
      RewritePatternSet patterns(ctx);
    
      patterns.add<QuantizeWeight>(ctx, quantization_component_spec_);
    
      FrozenRewritePatternSet frozen_patterns(std::move(patterns));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/convert_func_to_bfloat16.cc

     private:
      void runOnOperation() override;
    };
    
    void ConvertFuncToBfloat16Pass::runOnOperation() {
      func::FuncOp func_op = getOperation();
      MLIRContext* context = func_op.getContext();
      RewritePatternSet patterns(context);
    
      BFloat16TypeConverter converter;
      patterns.add<BFloat16TypePattern, BitcastConvertOpPattern>(converter,
                                                                 context);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/transforms/unfuse_batch_norm_pass.cc

        // TODO(b/299514833): Remove TensorFlowDialect usage.
        registry.insert<shape::ShapeDialect, mlir::TF::TensorFlowDialect>();
      }
    
      void runOnOperation() override {
        RewritePatternSet patterns(&getContext());
        patterns.add<UnfuseBatchNormTrainingPattern>(&getContext());
        patterns.add<UnfuseBatchNormInferencePattern>(&getContext());
        if (failed(applyPatternsAndFoldGreedily(getOperation(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.2K bytes
    - Viewed (0)
Back to top