Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 178 for dequantize (0.16 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc

        auto op_before_dequantize = original_dequantize_op.getOperand(0);
    
        // Create a new dequantize op that is propagated.
        rewriter.setInsertionPointAfter(user_op);
        TF::PartitionedCallOp new_dequantize_op =
            cast<TF::PartitionedCallOp>(rewriter.clone(*original_dequantize_op));
    
        // Skip the original dequant op and connect the op before dequantize to the
        // user op.
        user_op->setOperand(user_idx, op_before_dequantize);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/post_quantize.cc

                op->user_begin()->hasTrait<OpTrait::IsTerminator>())
              return failure();
          }
          // If the quantize op is a requantize op, it is being used in other scale
          // adjustments and should be kept. Instead, moving dequantize op before
          // the requantize op to remove the unnecessary requantize op.
          if (auto qtype = quant::QuantizedType::getQuantizedElementType(
                  q.getInput().getType())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.cc

                op->user_begin()->hasTrait<OpTrait::IsTerminator>())
              return failure();
          }
          // If the quantize op is a requantize op, it is being used in other scale
          // adjustments and should be kept. Instead, moving dequantize op before
          // the requantize op to remove the unnecessary requantize op.
          if (auto qtype =
                  QuantizedType::getQuantizedElementType(q.getArg().getType())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

        const auto float_graph = model_->subgraphs()->Get(subgraph_idx);
        // The output graph should have an extra tensor from the added dequantize
        // op.
        ASSERT_EQ(quantized_graph->tensors()->size(),
                  float_graph->tensors()->size() + 1);
        // Check that a dequantize op exists.
        int32_t dequant_input_idx = -1;
        int32_t dequant_output_idx = -1;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h

        // Finally, use the quantization parameter to create the quantize and
        // dequantize ops, and insert them between the tf.FakeQuantWithMinMaxVarsOp
        // and its users.
        auto quantize = rewriter.create<quantfork::QuantizeCastOp>(
            tf_op.getLoc(), qtype.getValue(), input);
        auto dequantize = rewriter.create<quantfork::DequantizeCastOp>(
            tf_op.getLoc(), res_type, quantize.getResult());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir

    // CHECK: %[[dequantize:.*]] = "tf.PartitionedCall"(%[[maxpool]]
    // CHECK-SAME: f = @dequantize_i8
    // CHECK: return %[[dequantize]]
    
    // CHECK: -------- Quantization Summary --------
    // CHECK: Number of quantized layers in the model
    // CHECK: --------------------------------
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 25.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

    // CHECK-DAG:       %[[VAL_8:.*]] = "tfl.pseudo_const"(){{.*}}dense<[384, 128]> : tensor<2xi32>
    // CHECK:           %[[VAL_9:.*]] = "tfl.dequantize"(%[[VAL_0]]) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<384x512x!quant.uniform<i8:f32, 1.000000e-01>>) -> tensor<384x512xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/post_quantize.cc

          if (!q->getAttr(kVolatileOpAttrName)) return failure();
    
          // If the quantize op is a requantize op, it is being used in other scale
          // adjustments and should be kept. Instead, move dequantize op before the
          // requantize op to remove the unnecessary requantize op.
          if (const QuantizedType qtype =
                  QuantizedType::getQuantizedElementType(q.getArg().getType())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td

            "MLIR dump file name.">,
        Option<"merge_fusion_with_dequantize_",
            "merge-fusion-with-dequantize",
            "bool", /*default=*/"false",
            "Whether to merge quantized conv/dot_general fusion with subsequent dequantize.">,
      ];
      let dependentDialects = [
        "mlir::arith::ArithDialect",
        "mlir::stablehlo::StablehloDialect",
        "mlir::quant::QuantizationDialect",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc

                               TFDynamicRangeQuantization>(ctx, quant_params) {}
    };
    
    // Removes quantize-dequantize pairs that are not used in the quantization.
    // The benefit of this pattern is set to lower value than other patterns, so
    // that the other patterns can work on quantize/dequantize ops first.
    class RemoveUnusedQdqPattern
        : public OpRewritePattern<quantfork::DequantizeCastOp> {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 23.6K bytes
    - Viewed (0)
Back to top