Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 14 for Quantile (0.15 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize="quantize-allowlist=quantize_float_placeholder_only,not_reset_input" | FileCheck %s
    // RUN: tf-opt %s -tfl-prepare-quantize="disable-set-input-nodes-quantization-params=true" | FileCheck --check-prefix=MixedPrecision %s
    // RUN: tf-opt %s -tfl-prepare-quantize="is-qdq-conversion=true" | FileCheck --check-prefix=QDQ %s
    
    // CHECK-LABEL: main
    // Uses `main` function to match the default target function of QuantSpecs and
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize="quantize-signed=true post-training-quantize=true" -cse | FileCheck %s
    // RUN: tf-opt %s -tfl-prepare-quantize="quantize-signed=true post-training-quantize=true legacy-float-scale=true" -cse| FileCheck --check-prefix=Legacy %s
    
    // CHECK-LABEL: QuantizeLstmCellInput
    func.func @QuantizeLstmCellInput(%arg0: tensor<1x28x28xf32>) -> tensor<1x28x20xf32> {
        %cst_2 = "tfl.no_value"() {value = unit} : () -> none
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 52.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc

    // %9 = stablehlo.constant  // Input 3 zero point z3.
    // %10 = stablehlo.constant  // s1 * s2.
    // %11 = call @uniform_quantize(%0, %2, %3)  // Quantize input (q1).
    // %12 = call @uniform_quantize_0(%1, %5, %6)  // Quantize input (q2).
    // %13 = stablehlo.convert %11  // i8->i32 cast for q1.
    // %14 = stablehlo.convert %3  // [Optional] i8->i32 cast for z1.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

    // CHECK:  return %[[RESULT]]
    }
    
    func.func @QDQsFollowedByTranspose(tensor<1x2xf32>) -> (tensor<2x1xf32>) {
    ^bb0(%arg0: tensor<1x2xf32>):
      %cst_0 = arith.constant dense<[1, 0]> : tensor<2xi32>
      %0 = "tfl.quantize"(%arg0){qtype = tensor<1x2x!quant.uniform<u8:f32, 1.0>>}: (tensor<1x2xf32>) -> (tensor<1x2x!quant.uniform<u8:f32, 1.0>>)
      %1 = "tfl.dequantize"(%0): (tensor<1x2x!quant.uniform<u8:f32, 1.0>>) -> (tensor<1x2xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

        // This is the argument used to refer to the pass in
        // the textual format (on the commandline for example).
        return "quant-quantize-composite-functions";
      }
    
      StringRef getDescription() const final {
        // This is a brief description of the pass.
        return "Quantize composite functions with QDQ input/outputs.";
      }
    
      void getDependentDialects(DialectRegistry& registry) const override {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

        readonly_model_ = input_model_->GetModel();
        model_ = UnPackFlatBufferModel(*readonly_model_);
      }
    };
    
    TEST_F(QuantizeLSTM2Test, VerifyLSTM) {
      // Quantize model.
      auto status = QuantizeModelAllOperators(
          &model_, TensorType_FLOAT32, TensorType_FLOAT32,
          /*allow_float=*/false, TensorType_INT8, output_buffer_);
      ASSERT_THAT(status, Eq(kTfLiteOk));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composit_functions_debugging.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions -quant-quantize-composite-functions | FileCheck --check-prefix=TF %s
    // RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions -quant-quantize-composite-functions='target-opset=XLA' | FileCheck --check-prefix=XLA %s
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 06 01:23:21 UTC 2023
    - 80.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/flatbuffer_import.cc

          return emitError(loc, type_or_err.status().ToString()),
                 type_or_err.status();
        }
        auto type = std::move(type_or_err).value();
    
        if (op_name == "tfl.quantize") {
          // Special case for quantize: return type must also be in qtype attribute
          op_state.addAttribute("qtype", mlir::TypeAttr::get(type));
        } else if (op_name == "tfl.reshape" && op_state.operands.size() == 1) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 66.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

    foreach BinaryOp = [TFL_DivOp, TFL_MulOp]<Op> in
      defm : FuseMulOrDivWithConv2dOrDepthwiseConv2d<BinaryOp>;
    
    
    // This pattern applies when the same quantize/dequantize have been used twice
    // with the same scale. We want to remove the redundancy.
    // TODO(fengliuai): move this to the sanity check of pre-quantize pass.
    def eliminate_dq_q_pairs : Pat<
      (TFL_QuantizeOp (TFL_DequantizeOp $in), $qt),
      (replaceWithValue $in),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/BUILD

            "transforms/post_quantize.cc",
            "transforms/prepare_quantize.cc",
            "transforms/prepare_quantize_dynamic_range.cc",
            "transforms/prepare_quantize_helper.cc",
            "transforms/quantize.cc",
            "transforms/quantize_variables.cc",
            "utils/generated_op_quant_spec_getters.inc",
        ],
        hdrs = [
            "transforms/passes.h",
            "transforms/prepare_quantize_helper.h",
        ],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:41:49 UTC 2024
    - 49.9K bytes
    - Viewed (0)
Back to top