Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for AffineQuantizedOpInterface (0.26 sec)

  1. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

        InterfaceMethod<
          [{Returns the fixed output range.}],
          "UniformQuantizedType", "GetFixedOutputRange",
          (ins "bool":$sign, "int":$bit_width)
        >,
      ];
    }
    
    def AffineQuantizedOpInterface : OpInterface<
      "AffineQuantizedOpInterface"> {
      let description = [{
        Interface for affine quantized ops (conv2d, fully_connected, etc.)
      }];
    
      let methods = [
        InterfaceMethod<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

    class TFL_ConvOp<string mnemonic, string opSummary, int index,
                     list<Trait> additional_traits = []> :
        TFL_Op<mnemonic,[Pure,
                         AccumulatorUniformScale<2, 0, 1>,
                         AffineQuantizedOpInterface,
                         AffineOpCoefficient<index, 1>,
                         QuantizableResult,
                         TFL_SparseOp] # additional_traits> {
      let summary = opSummary # " operator";
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

        int bit_width = quant_specs_.GetQuantizationTypeWidth();
    
        Operation* quantize_op = quant_op.first;
        int quantize_operand_num = quant_op.second;
    
        auto affine_user = dyn_cast<AffineQuantizedOpInterface>(quantize_op);
    
        bool op_with_per_axis_support = false;
    
        if (!llvm::dyn_cast_or_null<CustomOp>(quantize_op)) {
          bool op_with_narrow_range =
              affine_user &&
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc

      // FC and Conv* ops. Restriction for the weight can be relaxed if there are
      // needs for adjusting scale of variable weights.
      auto affine_op = dyn_cast<AffineQuantizedOpInterface>(op);
      auto bias_op = op->getOperand(bias_index).getDefiningOp<arith::ConstantOp>();
      if (!affine_op || !bias_op || input_indices.size() != 2) return false;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 38.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/flatbuffer_import.cc

        }
        for (auto& use : value.getUses()) {
          Operation* user = use.getOwner();
          if (user->hasTrait<mlir::OpTrait::IsTerminator>()) continue;
    
          auto affine_user = llvm::dyn_cast<mlir::AffineQuantizedOpInterface>(user);
          if (affine_user &&
              affine_user.GetAffineOperandIndex() == use.getOperandNumber() &&
              affine_user.RequiredNarrowRangeAffineOperand())
            continue;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 66.8K bytes
    - Viewed (0)
Back to top