Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 58 for dequantize (0.18 sec)

  1. tensorflow/compiler/mlir/lite/tests/post-quantize-dynamic-range.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize-dynamic-range="enable-custom-op-quantization=CustomTestOp=1" -tfl-quantize="enable-dynamic-range-quantization=true enable-custom-op-weight-only=CustomTestOp=false" -tfl-post-quantize="enable-no-side-effect=CustomTestOp=false" | FileCheck --check-prefix=NotPrune %s
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir

      // CHECK-DAG: %[[VAL2:.+]] = "tfl.dequantize"(%[[VAL0]])
      // CHECK-DAG: %[[VAL3:.+]] = "tfl.dequantize"(%[[VAL1]])
      // CHECK-DAG: %[[VAL4:.+]] = "tfl.conv_2d"(%arg0, %[[VAL2]], %[[VAL3]]) <{dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32}>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/tac/tests/pick-subgraphs.mlir

        %0 = "tfl.dequantize"(%arg0) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<100x!quant.uniform<i8:f32, 2.000000e-01:-3>>) -> tensor<100xf32>
        %1 = "tfl.dequantize"(%arg1) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<100x!quant.uniform<i8:f32, 2.000000e-01:-3>>) -> tensor<100xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/post_quantize.cc

                op->user_begin()->hasTrait<OpTrait::IsTerminator>())
              return failure();
          }
          // If the quantize op is a requantize op, it is being used in other scale
          // adjustments and should be kept. Instead, moving dequantize op before
          // the requantize op to remove the unnecessary requantize op.
          if (auto qtype = quant::QuantizedType::getQuantizedElementType(
                  q.getInput().getType())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

        const auto float_graph = model_->subgraphs()->Get(subgraph_idx);
        // The output graph should have an extra tensor from the added dequantize
        // op.
        ASSERT_EQ(quantized_graph->tensors()->size(),
                  float_graph->tensors()->size() + 1);
        // Check that a dequantize op exists.
        int32_t dequant_input_idx = -1;
        int32_t dequant_output_idx = -1;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

    // CHECK-DAG:       %[[VAL_8:.*]] = "tfl.pseudo_const"(){{.*}}dense<[384, 128]> : tensor<2xi32>
    // CHECK:           %[[VAL_9:.*]] = "tfl.dequantize"(%[[VAL_0]]) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<384x512x!quant.uniform<i8:f32, 1.000000e-01>>) -> tensor<384x512xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td

            "MLIR dump file name.">,
        Option<"merge_fusion_with_dequantize_",
            "merge-fusion-with-dequantize",
            "bool", /*default=*/"false",
            "Whether to merge quantized conv/dot_general fusion with subsequent dequantize.">,
      ];
      let dependentDialects = [
        "mlir::arith::ArithDialect",
        "mlir::stablehlo::StablehloDialect",
        "mlir::quant::QuantizationDialect",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir

      // CHECK: %[[DEQUANTIZE:.*]] = mhlo.uniform_dequantize %[[CONVERT_2]] : (tensor<2x!quant.uniform<i8:f32, 1.000000e+00:3>>) -> tensor<2xf32>
      // CHECK: return %[[DEQUANTIZE]] : tensor<2xf32>
    
      %0 = "tf.UniformQuantize"(%arg0, %scales, %zps) {
        quantization_axis = -1 : i64, quantization_min_val = -128 : i64, quantization_max_val = 127 : i64
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 01:25:29 UTC 2024
    - 37.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

            // correct float op should be the user of the last DequantizeOp.
            if (llvm::isa<QuantizeOpT>(user)) {
              user = *user->getResult(0).getUsers().begin();
            }
            if (auto dequantize = llvm::dyn_cast<DequantizeOpT>(user)) {
              // Replace all uses, except not quantizable ops that are being used in
              // the float backbone.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/modify_io_nodes.mlir

      %6 = "tfl.dequantize"(%5) : (tensor<1x401408x!quant.uniform<i8:f32, 3.906250e-03>>) -> tensor<1x401408xf32>
      func.return %6 : tensor<1x401408xf32>
    
    // CHECK-LABEL: func @modified(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x401408xf32>
    // CHECK-NEXT: %[[shape:.*]] = arith.constant dense<[1, 401408]> : tensor<2xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
Back to top