Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 69 for quantized_type (0.87 sec)

  1. tensorflow/compiler/mlir/lite/transforms/quantize_variables.cc

          auto dq_op = dyn_cast_or_null<DequantizeOp>(value_op);
          if (dq_op) {
            Type output_type = dq_op.getInput().getType();
            auto qtype = quant::QuantizedType::getQuantizedElementType(output_type);
            if (qtype == quant::QuantizedType::getQuantizedElementType(ref_qtype)) {
              // Same quantization parameters, remove it.
              builder.setInsertionPoint(assign_variable_op);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc

          auto input_type = input.get().getType();
          if (IsQI8Type(input_type) || IsQUI8Type(input_type) ||
              IsQI32Type(input_type)) {
            auto dequantized_input_type =
                mlir::quant::QuantizedType::castToExpressedType(input_type);
            builder->setInsertionPoint(op);
            auto dequantize_op = builder->create<TFL::DequantizeOp>(
                op->getLoc(), dequantized_input_type, input.get());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc

        if (arg.hasOneUse() && llvm::isa<QuantizeOp>(*arg.user_begin())) {
          auto quantize_op = llvm::cast<QuantizeOp>(*arg.user_begin());
          auto quantize_output = quantize_op.getOutput();
          auto current_type = quant::QuantizedType::getQuantizedElementType(
                                  quantize_output.getType())
                                  .getStorageType();
          if (current_type == input_type) {  // int8 == int8
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_weight_param.mlir

    // CHECK: return %[[CALL]] : tensor<1x3xf32>
    
    // -----
    
    // Test that q/dq pair with per-channel quantization parameter is inserted
    // between constant and XlaCallModule op with `weight_only_ptq` method of
    // `quatized_type` without specified quantization dimension and function name
    // containing conv.
    
    module attributes {tf_saved_model.semantics} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 22K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/utils/variables_utils.cc

        if (complex_element_type.isF32() || complex_element_type.isF64())
          return true;
      }
      // Check quantized types.
      if (auto quant_type = element_type.dyn_cast<mlir::quant::QuantizedType>()) {
        // TFLite supports QI16, QI32, QI8, and QUI8
        if ((quant_type.getStorageTypeIntegralWidth() == 16 &&
             quant_type.isSigned()) ||
            quant_type.getStorageTypeIntegralWidth() == 8 ||
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 21 19:32:03 UTC 2021
    - 2.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc

    }
    
    quant::QuantParams DefaultQuantParamsPass::GetQuantParamsForBias(
        Operation *op, int bias, const std::vector<int> &non_biases,
        quant::AccumulatorScaleFunc func) {
      std::vector<quant::QuantizedType> non_bias_types;
      non_bias_types.reserve(non_biases.size());
      for (int non_bias : non_biases) {
        Operation *non_bias_define = op->getOperand(non_bias).getDefiningOp();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/ir/Passes.h

    namespace func {
    class FuncOp;
    }  // namespace func
    
    namespace quantfork {
    
    /// Creates a pass that converts quantization simulation operations (i.e.
    /// FakeQuant and those like it) to casts into/out of supported QuantizedTypes.
    std::unique_ptr<OperationPass<func::FuncOp>> createConvertSimulatedQuantPass();
    
    /// Creates a pass that converts constants followed by a qbarrier to a
    /// constant whose value is quantized. This is typically one of the last
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jul 29 18:55:28 UTC 2022
    - 2.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h

    #include "mlir/IR/Types.h"  // from @llvm-project
    #include "mlir/Support/LLVM.h"  // from @llvm-project
    
    namespace mlir::quantfork {
    
    // Performs type conversion from an arbitrary input type to a type
    // that is expressed by a QuantizedType.
    //
    // This handles cases where the inputType is a supported primitive type
    // (i.e. f32, bf16, etc) or a vector/tensor type based on a supported
    // elemental type.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/transforms/unfold_splat_constant_pass.cc

        if (splat_elements_attr.getNumElements() == 1) {
          return;
        }
        auto element_type = splat_elements_attr.getType().getElementType();
        if (mlir::isa<ComplexType>(element_type) ||
            mlir::isa<quant::QuantizedType>(element_type)) {
          return;
        }
        op_builder->setInsertionPoint(const_op);
        Value scalar = op_builder->create<mhlo::ConstantOp>(
            const_op->getLoc(),
            DenseElementsAttr::get(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 3.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc

        return ExpressedToQuantizedConverter{input_type, input_type};
      // Unsupported.
      return ExpressedToQuantizedConverter{input_type, nullptr};
    }
    
    Type ExpressedToQuantizedConverter::convert(
        quant::QuantizedType elemental_type) const {
      assert(expressed_type && "convert() on unsupported conversion");
      if (auto tensor_type = dyn_cast<RankedTensorType>(input_type))
        return RankedTensorType::get(tensor_type.getShape(), elemental_type);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 4.3K bytes
    - Viewed (0)
Back to top