Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 25 for FakeQuant (0.22 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py

      saved_model_proto = saved_model_loader.parse_saved_model(saved_model_path)
      for meta_graph in saved_model_proto.meta_graphs:
        if any(
            node.op.startswith('FakeQuant') for node in meta_graph.graph_def.node
        ):
          return True
        for function in meta_graph.graph_def.library.function:
          if any(node.op.startswith('FakeQuant') for node in function.node_def):
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 34.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h

      // valid when the quantization parameters need to be created by scanning the
      // constant content (post-training quantization or QAT without weight
      // FakeQuant).
      bool disable_per_channel = false;
    
      // Whether to disable per-channel weight quantization and enable legacy per
      // tensor quantization. The legacy quantization for Dense layers is
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 10:16:19 UTC 2024
    - 10.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

      SmallVector<T> result(max_size);
      for (size_t i : llvm::seq<size_t>(0, max_size)) {
        result[i] = get_value_at(a, i) * get_value_at(b, i);
      }
      return result;
    }
    
    // Multiplies the value followed by a FakeQuant op and adjusts the quantization
    // params. This function only supports symmetrically quantized values.
    Value MultiplyFakeQuantValue(OpBuilder& builder, Location loc, Value value,
                                 Value multiplier) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_ops.cc

    // can parsed by the pass option.
    constexpr char kDefaultMode[] = "DEFAULT";
    constexpr char kLegacyIntegerMode[] = "LEGACY_INTEGER";
    
    // Checks if the operation is TF FakeQuant ops.
    bool IsTfFakeQuantOp(Operation *op) {
      return llvm::isa<
          // clang-format off
          TF::FakeQuantWithMinMaxArgsOp,
          TF::FakeQuantWithMinMaxVarsOp,
          TF::FakeQuantWithMinMaxVarsPerChannelOp
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

    // TODO(b/204265523): Removes this pass after the exporting MLIR to SavedModel
    // path is available.
    std::unique_ptr<OperationPass<ModuleOp>> CreateInsertMainFunctionPass();
    
    // Converts FakeQuant ops to quant.qcast and quant.dcast (QDQ) pairs.
    std::unique_ptr<OperationPass<func::FuncOp>> CreateConvertFakeQuantToQdqPass();
    
    // Lifts the quantizable spots as composite functions.
    std::unique_ptr<OperationPass<ModuleOp>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tf_tfl_passes.cc

    // it.
    void AddPreVariableFreezingTFToTFLConversionPasses(
        const mlir::TFL::PassConfig& pass_config,
        mlir::OpPassManager* pass_manager) {
      // This pass wraps all the tf.FakeQuant ops in a custom op so they are not
      // folded before being converted to tfl.quantize and tfl.dequantize ops.
      auto wrapped_ops = mlir::TFL::AllTfFakeQuantOps();
      pass_manager->addNestedPass<mlir::func::FuncOp>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 25.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

         (Arith_ConstantOp ConstantAttr<RankedF32ElementsAttr<[]>, "0.166666666f">),
         TFL_AF_None),
        $x,
        TFL_AF_None),
      (TFL_HardSwishOp $x)>;
    
    // Matching HardSwish with extra FakeQuant. These FakeQuant ops were due to
    // incorrect placement in the quantization aware training.
    def MatchHardSwishQuantized : Pat<
      (TFL_MulOp (TFL_DequantizeOp (TFL_QuantizeOp
        (TFL_MulOp
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc

              !dyn_cast<quantfork::QuantizeCastOp>(user)) {
            // Needs to scan the content of weights to get the quantization
            // parameters if there are no quantization parameters (FakeQuant ops).
            // For this case, the weight will not be duplicated.
            weights_.insert(cst);
            if (spec->coeff_op_quant_dim.find(operand_num) !=
                spec->coeff_op_quant_dim.end()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 38.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc

      llvm::DenseSet<Operation*> redundant_stats_ops;
    
      // Step 0: remove the quantfork::StatisticsOp which are used by the
      // quant.qcast op in case it overrides the information from training FakeQuant
      // ops.
      func.walk([&](quantfork::QuantizeCastOp q) {
        auto input_op = q.getArg().getDefiningOp();
        if (auto stats = dyn_cast_or_null<quantfork::StatisticsOp>(input_op)) {
          q.setOperand(stats.getArg());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 43.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

      // outputs.
      llvm::DenseMap<SignedInteger, QuantParamsForResults> restricted_output_params;
    
      // Coefficient operand index and whether supporting per-channel quantization.
      // For QAT, this information is carried by the FakeQuant*/Quantize/Dequantize
      // ops, but post-training quantization, the quantization parameters need to be
      // inferred from the tensor content and op property. A "-1" value indicates
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
Back to top