Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 15 for dequantize (0.68 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

    // MixedPrecision-NEXT: %[[q:.*]] = "tfl.quantize"(%arg0)
    // MixedPrecision-NEXT: %[[dq:.*]] = "tfl.dequantize"(%[[q]])
    // MixedPrecision-NEXT: %[[q_0:.*]] = "tfl.quantize"(%arg1)
    // MixedPrecision-NEXT: %[[dq_0:.*]] = "tfl.dequantize"(%[[q_0]])
    // MixedPrecision-NEXT: %[[c:.*]] = "tfl.concatenation"(%[[dq]], %[[dq_0]])
    // MixedPrecision-NEXT: %[[q_1:.*]] = "tfl.quantize"(%[[c]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize="quantize-signed=true post-training-quantize=true" -cse | FileCheck %s
    // RUN: tf-opt %s -tfl-prepare-quantize="quantize-signed=true post-training-quantize=true legacy-float-scale=true" -cse| FileCheck --check-prefix=Legacy %s
    
    // CHECK-LABEL: QuantizeLstmCellInput
    func.func @QuantizeLstmCellInput(%arg0: tensor<1x28x28xf32>) -> tensor<1x28x20xf32> {
        %cst_2 = "tfl.no_value"() {value = unit} : () -> none
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 52.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

    ^bb0(%arg0: tensor<1x2xf32>):
      %cst_0 = arith.constant dense<[1, 0]> : tensor<2xi32>
      %0 = "tfl.quantize"(%arg0){qtype = tensor<1x2x!quant.uniform<u8:f32, 1.0>>}: (tensor<1x2xf32>) -> (tensor<1x2x!quant.uniform<u8:f32, 1.0>>)
      %1 = "tfl.dequantize"(%0): (tensor<1x2x!quant.uniform<u8:f32, 1.0>>) -> (tensor<1x2xf32>)
      %2 = "tf.Transpose"(%1, %cst_0): (tensor<1x2xf32>, tensor<2xi32>) -> tensor<2x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc

        if (!combined_scale_constant_op) {
          LLVM_DEBUG(llvm::dbgs()
                     << "Failed to match combined_scale_constant_op.\n");
          return failure();
        }
    
        // Quantize -> Dequantize following r3.
        auto output_uniform_quantize_call_op = dyn_cast_or_null<func::CallOp>(
            *combined_scale_multiply_op.getResult().user_begin());
        if (!output_uniform_quantize_call_op->hasOneUse()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

            call_op, result_types, args,
            FlatSymbolRefAttr::get(new_quant_func_name));
    
        return success();
      }
    
      // For composite functions followed by Dequantize ops, merges the Dequantize
      // op into the functions by creating quantized functions with float output.
      LogicalResult mergeDequantizeOpFollowingQuantizedFunction(
          TF::PartitionedCallOp call_op, const SmallVector<Value, 4>& args,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

      %2 = "tfl.dequantize"(%1) : (tensor<1x384x384x!quant.uniform<i8:f32, 0.003:-128>>) -> tensor<1x384x384xf32>
      %3 = "tfl.pseudo_const"() {value = dense<1.000000e+00> : tensor<1x1x384xf32>} : () -> tensor<1x384x384xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

      // The original model reshape->custom->custom->squeeze.
      ASSERT_THAT(*float_graph->operators(), SizeIs(4));
      // The resulting model should be:
      // reshape->dequantize->custom->custom->quantize->squeeze.
      ASSERT_THAT(subgraph->operators, SizeIs(6));
      const std::vector<BuiltinOperator> op_codes = {
          BuiltinOperator_RESHAPE,  BuiltinOperator_DEQUANTIZE,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

      // before converting TF_Conv to TFL_Conv
      (void)applyPatternsAndFoldGreedily(func, std::move(patterns));
    
      // Remove the wrapper of the tf.FakeQuant* ops and also insert the
      // tfl.quantize and tfl.dequantize to preserve the quantization parameters.
      // This is done after the first round of optimization to make sure all the
      // min/max operands of the tf.FakeQuant* are constants to be matched. The
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

    foreach BinaryOp = [TFL_DivOp, TFL_MulOp]<Op> in
      defm : FuseMulOrDivWithConv2dOrDepthwiseConv2d<BinaryOp>;
    
    
    // This pattern applies when the same quantize/dequantize have been used twice
    // with the same scale. We want to remove the redundancy.
    // TODO(fengliuai): move this to the sanity check of pre-quantize pass.
    def eliminate_dq_q_pairs : Pat<
      (TFL_QuantizeOp (TFL_DequantizeOp $in), $qt),
      (replaceWithValue $in),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/mark_for_compilation_pass.cc

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
Back to top