Search Options

Results per page
Sort
Preferred Languages
Advance

Results 71 - 80 of 203 for dequantize (0.34 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

            per_axis_type.getStorageTypeMin(), per_axis_type.getStorageTypeMax());
      }
    
      auto quantize = builder.create<quantfork::QuantizeCastOp>(
          q_op.getLoc(), new_value_type.clone(new_qtype), new_value);
      auto dequantize = builder.create<quantfork::DequantizeCastOp>(
          dq_op.getLoc(), new_value_type, quantize.getResult());
      return dequantize.getResult();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.td

        * A tensor is dequantized using a `func::FuncOp` whose name contains
          "uniform_dequantize". The first argument is the tensor to be quantized,
          the second argument is the zero point constant (element type: int) and
          the third argument is the inverse scale constant (element type: float).
        * Inputs to the target quantized op is quantized and the outputs are
          dequantized.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 21:59:06 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/passes.td

      ];
    }
    def DecomposeHybridQuantizationPass : Pass<"tfl-decompose-hybrid-quantization", "mlir::func::FuncOp"> {
      let summary = "Decomposes hybridge quantization to explicit quantize / dequantize";
      let description = [{
          Decomposes (with explicit quantize/dequantize ops) selected math
          operations which exist in the model with hybrid quantization
          (some arguments/results left in floating point).
      }];
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 22.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc

        op->erase();
      });
    }
    
    // Fold quantized i32 (normally bias) into their float values.
    struct FoldQuantizedI32ToFloat : public OpRewritePattern<TFL::DequantizeOp> {
      using OpRewritePattern<TFL::DequantizeOp>::OpRewritePattern;
    
      LogicalResult matchAndRewrite(TFL::DequantizeOp dequant_op,
                                    PatternRewriter& rewriter) const override {
        // We only fold i32 -> float pattern.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h

    // whose storage type is 32-bit integer and expressed type is f32.
    bool IsI32F32UniformQuantizedPerAxisType(Type type);
    
    // Determines whether the storage type of a quantized type is supported by
    // `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported.
    bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type);
    
    // Returns true if a type is quantized tensor type.
    bool IsQuantizedTensorType(Type type);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir

    }
    // CHECK-LABEL: uniform_dequantize_op_ui16_storage_input
    // CHECK: stablehlo.uniform_dequantize
    // CHECK-NOT: tfl.dequantize
    
    // -----
    
    // Tests that the pattern doesn't match when the input quantized tensor's
    // storage type is i32. i32 storage type is not compatible with
    // `tfl.dequantize`.
    
    func.func @uniform_dequantize_op_i32_storage_input(%arg: tensor<2x2x!quant.uniform<i32:f32, 1.000000e+0:8>>) -> tensor<2x2xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 106.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

      %4 = "tfl.reshape"(%3, %1) : (tensor<1x4x384x32x!quant.uniform<i8:f32, 0.19:1>>, tensor<3xi32>) -> tensor<4x384x32x!quant.uniform<i8:f32, 0.19:1>>
      // CHECK-NOT: tac.device tac.inference_type
      %5 = "tfl.dequantize"(%4) : (tensor<4x384x32x!quant.uniform<i8:f32, 0.19:1>>) -> tensor<4x384x32xf32>
      func.return %5 : tensor<4x384x32xf32>
    
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/common/tfl_pass_config.h

      // have side effects e.g. reduced flatbuffer size. Only certain type
      // conversions are supported.
      bool reduce_type_precision = false;
      // Whether to consider this model a quantized model with quantize/dequantize
      // ops and to convert kernels to quantized kernels wherever appropriate.
      quant::QDQConversionMode qdq_conversion_mode =
          quant::QDQConversionMode::kQDQNone;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:05:30 UTC 2024
    - 6.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc

            returned_type = quant::ConvertSignedQuantizedToUnsigned(
                dequantize_input.getType(), dequantize_op.getLoc());
            // replace the dequantize op by a quantize op
            TypeAttr type_attr = TypeAttr::get(returned_type);
            auto quantize_op = builder.create<QuantizeOp>(
                dequantize_op.getLoc(), returned_type, dequantize_input, type_attr);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc

          auto* inst = value.getDefiningOp();
          if (!inst) {
            continue;
          }
    
          // There could be a Dequantize op after the weight tensor in cases like
          // fp16 post-training quantization. We need to get the weight from the
          // input of the Dequantize op.
          if (isa<DequantizeOp>(inst)) {
            op = inst;
            value = inst->getOperand(0);
            inst = value.getDefiningOp();
            if (!inst) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 16.1K bytes
    - Viewed (0)
Back to top