Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 49 for zero_point (0.19 sec)

  1. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

        } else if (auto aqtype = qtype.template dyn_cast<
                                 quant::UniformQuantizedPerAxisType>()) {
          auto zero_points = aqtype.getZeroPoints();
          llvm::SmallVector<int64_t, 4> new_zero_points(zero_points.begin(),
                                                        zero_points.end());
          for (int i = 0, e = new_zero_points.size(); i < e; ++i) {
            new_zero_points[i] -= offset;
          }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

        auto result_type = getOutput().getType();
        // central_value = min_value / 2 + (max_value - 1) / 2 + 1
        // zero_point = central_value
        // scale = 1. / (central_value - min_value)
        return quant::GetFixedOutputRange(is_signed, bit_width, result_type,
            /*scale=*/1.0 / (1<<(bit_width-1)), /*zero_point=*/0);
      }
      }];
    }
    
    def TFL_LeakyReluOp: TFL_Op<"leaky_relu", [
        SameOperandsAndResultShape,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc

    // uniform_dequantize functions. Returns `failure()` if it doesn't match.
    LogicalResult MatchZeroPointsOperand(Value zero_points) {
      if (!zero_points) {
        LLVM_DEBUG(llvm::dbgs() << "Zero point value is empty.\n");
        return failure();
      }
    
      auto zero_points_type =
          mlir::dyn_cast_or_null<TensorType>(zero_points.getType());
      if (!zero_points_type) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/flatbuffer_import.cc

    // If the input `tensor` has scale/zero_point, `res` should have quantized
    // type, thus none stats op is required and nullptr is returned.
    // If the min max information is invalid, nullptr is returned.
    mlir::Operation* ConvertMinMaxToStatsOp(const TensorT& tensor, OpBuilder b,
                                            Value res) {
      // If the `tensor` has scale/zero_point, it must have been quantized, then the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 66.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

        const double result_scale = input_scale * filter_scale;
    
        accumulation_quantized_element_type = CreateI32F32UniformQuantizedType(
            gemm_style_op->getLoc(), *rewriter.getContext(), result_scale,
            /*zero_point=*/0);
    
        new_gemm_style_op_result_type = gemm_style_op_result_type.cloneWith(
            gemm_style_shape, accumulation_quantized_element_type);
      }
    
      gemm_style_op_result.setType(new_gemm_style_op_result_type);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/schema/schema_generated.h

      { auto _e = zero_point(); if (_e) { _o->zero_point.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->zero_point[_i] = _e->Get(_i); } } else { _o->zero_point.resize(0); } }
      { auto _e = details_type(); _o->details.type = _e; }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 1M bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

      let summary = "Perform dequantization on the quantized Tensor `input`.";
    
      let description = [{
    Given quantized `input` which was quantized using `scales` and `zero_points`, performs dequantization using the formula:
    dequantized_data = (quantized_data - zero_point) * scale.
      }];
    
      let arguments = (ins
        Arg<TensorOf<[TF_Qint32, TF_Qint8]>, [{Must be a Tensor of Tin.}]>:$input,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/flatbuffer_export.cc

        std::vector<float> scales = {static_cast<float>(qtype.getScale())};
        std::vector<int64_t> zero_points = {qtype.getZeroPoint()};
        q_params = tflite::CreateQuantizationParameters(
            builder_, /*min=*/0, /*max=*/0, builder_.CreateVector<float>(scales),
            builder_.CreateVector<int64_t>(zero_points));
      } else if (auto qtype = mlir::dyn_cast<mlir::quant::CalibratedQuantizedType>(
                     element_type)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:41:49 UTC 2024
    - 164.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_traits.h

    // template argument list.
    template <unsigned BitWidth, int ZeroPoint, int ScaleMantissa, int ScaleExp,
              int64_t StorageTypeMin, int64_t StorageTypeMax, bool Sign>
    class FixedResultUniformScale {
     public:
      template <typename ConcreteType>
      class Impl
          : public QuantizationSpecTraitBase<
                ConcreteType, FixedResultUniformScale<
                                  BitWidth, ZeroPoint, ScaleMantissa, ScaleExp,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.cc

      SmallVector<double, 4> scales;
      SmallVector<int64_t, 4> zeroPoints;
      scales.reserve(axisSize);
      zeroPoints.reserve(axisSize);
      for (size_t axis = 0; axis != axisSize; ++axis) {
        double rmin = rmins[axis];
        double rmax = rmaxs[axis];
        if (std::fabs(rmax - rmin) < std::numeric_limits<double>::epsilon()) {
          scales.push_back(1.0);
          zeroPoints.push_back(qmin);
          continue;
        }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 11:52:27 UTC 2024
    - 7.7K bytes
    - Viewed (0)
Back to top