Search Options

Results per page
Sort
Preferred Languages
Advance

Results 101 - 110 of 203 for dequantize (0.27 sec)

  1. tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs

    // set of acceptable options.
    // LINT.IfChange
    enum BuiltinOperator : int32 {
      ADD = 0,
      AVERAGE_POOL_2D = 1,
      CONCATENATION = 2,
      CONV_2D = 3,
      DEPTHWISE_CONV_2D = 4,
      DEPTH_TO_SPACE = 5,
      DEQUANTIZE = 6,
      EMBEDDING_LOOKUP = 7,
      FLOOR = 8,
      FULLY_CONNECTED = 9,
      HASHTABLE_LOOKUP = 10,
      L2_NORMALIZATION = 11,
      L2_POOL_2D = 12,
      LOCAL_RESPONSE_NORMALIZATION = 13,
      LOGISTIC = 14,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 14:28:27 UTC 2024
    - 30K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/optimize.cc

          if (fc_op.getFilter() != filter) {
            // This filter goes through quantize and dequantize ops. Then we just
            // need to update the weight to the quantize op.
            filter.replaceAllUsesWith(new_filter_op);
          } else {
            // This filter doesn't go through quantize and dequantize ops, Then
            // we update the weight of the affine op directly.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

      %2 = "tfl.dequantize"(%1) : (tensor<1x384x384x!quant.uniform<i8:f32, 0.003:-128>>) -> tensor<1x384x384xf32>
      %3 = "tfl.pseudo_const"() {value = dense<1.000000e+00> : tensor<1x1x384xf32>} : () -> tensor<1x384x384xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/schema/schema.fbs

    // set of acceptable options.
    // LINT.IfChange
    enum BuiltinOperator : int32 {
      ADD = 0,
      AVERAGE_POOL_2D = 1,
      CONCATENATION = 2,
      CONV_2D = 3,
      DEPTHWISE_CONV_2D = 4,
      DEPTH_TO_SPACE = 5,
      DEQUANTIZE = 6,
      EMBEDDING_LOOKUP = 7,
      FLOOR = 8,
      FULLY_CONNECTED = 9,
      HASHTABLE_LOOKUP = 10,
      L2_NORMALIZATION = 11,
      L2_POOL_2D = 12,
      LOCAL_RESPONSE_NORMALIZATION = 13,
      LOGISTIC = 14,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  5. RELEASE.md

        ([CVE-2022-21728](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21728))
    *   Fixes a heap OOB access in `Dequantize`
        ([CVE-2022-21726](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21726))
    *   Fixes an integer overflow in shape inference for `Dequantize`
        ([CVE-2022-21727](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21727))
    *   Fixes a heap OOB access in `FractionalAvgPoolGrad`
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 730.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/quantize.cc

    // Base struct for quantization.
    template <QuantizationTrait quantization_trait, typename ConcreteT,
              typename RootOpT = DequantizeOp>
    struct TFLQuantizationBase
        : public quant::QuantizationPattern<ConcreteT, QuantizeOp, DequantizeOp,
                                            NumericVerifyOp, RootOpT> {
      explicit TFLQuantizationBase(MLIRContext* ctx,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

      // The original model reshape->custom->custom->squeeze.
      ASSERT_THAT(*float_graph->operators(), SizeIs(4));
      // The resulting model should be:
      // reshape->dequantize->custom->custom->quantize->squeeze.
      ASSERT_THAT(subgraph->operators, SizeIs(6));
      const std::vector<BuiltinOperator> op_codes = {
          BuiltinOperator_RESHAPE,  BuiltinOperator_DEQUANTIZE,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

      // before converting TF_Conv to TFL_Conv
      (void)applyPatternsAndFoldGreedily(func, std::move(patterns));
    
      // Remove the wrapper of the tf.FakeQuant* ops and also insert the
      // tfl.quantize and tfl.dequantize to preserve the quantization parameters.
      // This is done after the first round of optimization to make sure all the
      // min/max operands of the tf.FakeQuant* are constants to be matched. The
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

    foreach BinaryOp = [TFL_DivOp, TFL_MulOp]<Op> in
      defm : FuseMulOrDivWithConv2dOrDepthwiseConv2d<BinaryOp>;
    
    
    // This pattern applies when the same quantize/dequantize have been used twice
    // with the same scale. We want to remove the redundancy.
    // TODO(fengliuai): move this to the sanity check of pre-quantize pass.
    def eliminate_dq_q_pairs : Pat<
      (TFL_QuantizeOp (TFL_DequantizeOp $in), $qt),
      (replaceWithValue $in),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-quantize -verify-each=false | FileCheck %s
    
    // Tests for PopulateFusedGemmStylePatterns are handled in
    // quantize_composite_functions for module-level evaluation of functions.
    
    module attributes {tf_saved_model.semantics} {
    // CHECK: quantize_simple_xla_call_module(%[[ARG_0:.+]]: tensor<1x4xf32>)
      func.func private @quantize_simple_xla_call_module(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 01:38:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
Back to top