Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 12 for qdq (0.09 sec)

  1. tensorflow/compiler/mlir/lite/tests/post-quantize.mlir

    // QDQ-NEXT: %[[out1:.*]] = "tfl.dequantize"(%[[split]]#0) : (tensor<2x!quant.uniform<u8:f32, 1.000000e+00>>) -> tensor<2xf32>
    // QDQ-NEXT: %[[out2:.*]] = "tfl.dequantize"(%[[split]]#1) : (tensor<2x!quant.uniform<u8:f32, 1.000000e+00>>) -> tensor<2xf32>
    // QDQ-NEXT: return %[[out1]], %[[out2]] : tensor<2xf32>, tensor<2xf32>
    }
    
    // CHECK-LABEL: RemoveTrival
    // QDQ-LABEL: RemoveTrival
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/passes.td

                "std::string", "", "Specifies which custom ops are NoSideEffect.">,
      ];
    }
    
    def PostQuantizeRemoveQDQPass : Pass<"tfl-post-quantize-remove-qdq", "mlir::func::FuncOp"> {
      let summary = "Remove qdq from input and output nodes after quantization.";
      let constructor = "CreatePostQuantizeRemoveQDQPass()";
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 22.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc

            ret.setOperand(i, quantized.front());
          }
          i++;
        }
      });
    
      // Check for  (Quant (Dequant $in), $qA) "qdq" pairs that couldn't be
      // eliminated at this point.  This only occurs for the pattern
      //      (Quant (Dequant (Quant $in, $qB)), $qA)   $qB != $qA
      // where the  qdq pair denotes a non-trivial requantization of an
      // already quantized value. Since this makes little sense (directly quantizing
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/python/saved_model_to_tfl_flatbuffer.cc

      } else if (toco_flags.qdq_conversion_mode() == "NONE") {
        pass_config.quant_specs.qdq_conversion_mode =
            mlir::quant::QDQConversionMode::kQDQNone;
      } else {
        return errors::InvalidArgument("Unknown QDQ conversion mode: ",
                                       toco_flags.qdq_conversion_mode());
      }
    
      if (toco_flags.has_qdq_conversion_mode() &&
          toco_flags.qdq_conversion_mode() != "NONE") {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun May 12 12:39:37 UTC 2024
    - 11K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc

            "quantization error!");
      });
    
      // Check for  (Quant (Dequant $in), $qA) "qdq" pairs that couldn't be
      // eliminated at this point.  This only occurs for the pattern
      //      (Quant (Dequant (Quant $in, $qB)), $qA)   $qB != $qA
      // where the  qdq pair denotes a non-trivial requantization of an
      // already quantized value. Since this makes little sense (directly quantizing
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/post_quantize.cc

        this->emit_quant_adaptor_ops_ = emit_quant_adaptor_ops;
      }
    
      void runOnOperation() override;
    
     private:
      quant::CustomOpMap custom_op_map_;
    };
    
    // Cleans up unnecessary QDQ pattern for input/output ops.
    class PostQuantizeRemoveQDQPass
        : public impl::PostQuantizeRemoveQDQPassBase<PostQuantizeRemoveQDQPass> {
     public:
      MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(PostQuantizeRemoveQDQPass)
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h

      // type for `arg` is `quantized_type`.
      void QuantizeArg(BlockArgument arg, QuantizedType quantized_type);
    
      // Inserts the Quantize and Dequantize ops (i.e. QDQ) after `value`. The
      // quantized element type for `value` is `quantized_type`.
      void QuantizeValue(Value value, QuantizedType quantized_type, Location loc);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 20 11:42:17 UTC 2024
    - 16.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h

      // specified in this map are subject to quantization.
      CustomOpMap custom_map;
    
      // If other than kQDQNone, the model is a floating point graph with QDQ ops
      // to be eliminated and fused into quantized kernels.
      QDQConversionMode qdq_conversion_mode = QDQConversionMode::kQDQNone;
    };
    
    // Parses the command line flag strings to the CustomOpMap specification.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 10:16:19 UTC 2024
    - 10.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td

      let summary = "Restores function name from XlaCallModule op.";
    }
    
    def QuantizeCompositeFunctionsPass : Pass<"stablehlo-quantize-composite-functions", "ModuleOp"> {
      let summary = "Quantize composite functions with QDQ input / outputs.";
      let options = [
        Option<"enable_per_channel_quantized_weight_",
            "enable-per-channel-quantized-weight",
            "bool", /*default=*/"true",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

    // TODO(b/204265523): Removes this pass after the exporting MLIR to SavedModel
    // path is available.
    std::unique_ptr<OperationPass<ModuleOp>> CreateInsertMainFunctionPass();
    
    // Converts FakeQuant ops to quant.qcast and quant.dcast (QDQ) pairs.
    std::unique_ptr<OperationPass<func::FuncOp>> CreateConvertFakeQuantToQdqPass();
    
    // Lifts the quantizable spots as composite functions.
    std::unique_ptr<OperationPass<ModuleOp>>
    CreateLiftQuantizableSpotsAsFunctionsPass(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
Back to top