Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 25 for FakeQuant (0.19 sec)

  1. tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.h

    limitations under the License.
    ==============================================================================*/
    //
    // This file defines support utilities for interoperating with FakeQuant* based
    // QAT (Quantized Aware Training) computations, as implemented by TFLite. Note
    // that FakeQuant* operators mix multiple concerns specific to how TFLite
    // originally implemented quantization. As such, utilities here enforce
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 11:52:27 UTC 2024
    - 3.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h

    //              tfl.dequantize
    //                   |
    //
    //
    // Warns if the (most likely unwanted, currently not quite correctly handled)
    // case of back-to-back tf.FakeQuant occurs
    //
    //             tf.FakeQuant*
    //                   |
    //             tf.FakeQuant*
    //
    template <typename TFFakeQuantOp, bool PerAxis, class FetchMinMax>
    class InsertTFLQuantOpsAfterTFFakeQuantOp {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/utils/fake_quant_utils.cc

            TF::FakeQuantWithMinMaxArgsOp, /*PerAxis=*/false,
            FetchMinMaxAttrs<TF::FakeQuantWithMinMaxArgsOp>>;
    
    // Removes the wrapper of the tf.FakeQuant* ops and creates the tfl.quantize
    // and tfl.dequantize pairs before tf.FakeQuant* being foled.
    LogicalResult ConvertFakeQuantOps(func::FuncOp func, MLIRContext* ctx,
                                      bool use_fake_quant_num_bits) {
      OpBuilder builder(func);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 03 00:14:05 UTC 2023
    - 4.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.cc

    // Removes the wrapper of the tf.FakeQuant* ops and creates the quant.qcast
    // and quant.dcast pairs before tf.FakeQuant* ops are being foled.
    LogicalResult ConvertFakeQuantOps(func::FuncOp func, MLIRContext* ctx,
                                      bool use_fake_quant_num_bits) {
      OpBuilder builder(func);
    
      // Insert the quant.qcast/quant.dcast ops in place of the tf.FakeQuant* ops to
      // preserve the quantization parameters.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 19 00:13:50 UTC 2022
    - 2.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h

    //                   |                          |
    //
    // Warns if the (most likely unwanted, currently not quite correctly handled)
    // case of back-to-back tf.FakeQuant occurs
    //
    //             tf.FakeQuant*
    //                   |
    //             tf.FakeQuant*
    //
    template <typename TFFakeQuantOp, bool PerAxis, class FetchMinMax>
    class ConvertFakeQuantOpToQuantOps {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_fake_quant_to_qdq.cc

      if (failed(
              ConvertFakeQuantOps(func, ctx, /*use_fake_quant_num_bits=*/false))) {
        func.emitError() << "quant-convert-fake-quant-to-qdq pass failed.";
        signalPassFailure();
      }
    
      // For removing dead FakeQuant* ops
      RewritePatternSet patterns(ctx);
      if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) {
        signalPassFailure();
      }
    }
    
    }  // namespace
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 15 00:56:15 UTC 2023
    - 2.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

              diag << "Skipping reordering between FakeQuant and "
                   << (*target_ops.begin())->getName()
                   << ", since there are other ops using the FakeQuant result.";
            });
          }
        }
      }
      return ::mlir::success();
    }
    
    // Reorder the FakeQuant operation for specific ops (ReorderOp).
    // The transformation pattern looks like below:
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/quantization/ir/Passes.h

    namespace mlir {
    namespace func {
    class FuncOp;
    }  // namespace func
    
    namespace quantfork {
    
    /// Creates a pass that converts quantization simulation operations (i.e.
    /// FakeQuant and those like it) to casts into/out of supported QuantizedTypes.
    std::unique_ptr<OperationPass<func::FuncOp>> createConvertSimulatedQuantPass();
    
    /// Creates a pass that converts constants followed by a qbarrier to a
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jul 29 18:55:28 UTC 2022
    - 2.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.cc

      Type storageType;
      int64_t qmin;
      int64_t qmax;
      if (getDefaultStorageParams(numBits, narrowRange, isSigned, ctx, storageType,
                                  qmin, qmax)) {
        return (emitError(loc, "unsupported FakeQuant number of bits: ") << numBits,
                nullptr);
      }
    
      // Special case where min/max is close enough. The tensor contents are all
      // 0.0s, so the scale is set to 1.0 and the tensor can be quantized to zero
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 11:52:27 UTC 2024
    - 7.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td

    // Checks if a value can be symetrically quantized.
    def CanBeSymmetricallyQuantized : Constraint<CPred<"CanBeSymmetricallyQuantized($0)">>;
    
    // Multiplies the value followed by a FakeQuant op and adjusts its params.
    def MultiplyFakeQuantValue : NativeCodeCall<
      "MultiplyFakeQuantValue($_builder, $_loc, $0...)">;
    
    // Convert AddV2Op following an AffineOp to BiasAddOp.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 8.4K bytes
    - Viewed (0)
Back to top