Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 91 for Quantized (0.13 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

    // RUN: odml-to-stablehlo-opt --compose-uniform-quantized-type \
    // RUN:     --split-input-file --verify-diagnostics %s | FileCheck %s
    
    module {
    // CHECK-LABEL: quantized_conv_op
    // CHECK-SAME: %[[ARG:.*]]: tensor<1x3x3x4xf32>
      func.func @quantized_conv_op(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> {
        %1 = stablehlo.constant dense<1.000000e+03> : tensor<1x1x1x1xf32>  // Input inverse scale.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc

              !IsConnectedWithCompsiteFunction(quantizing_op)) {
            continue;
          }
    
          // Same scale op is not supported for Uniform Quantized ops.
          if (target_opset_ == OpSet::UNIFORM_QUANTIZED) {
            continue;
          }
    
          // Collect all the quantized inputs and "clone" the matched op by these
          // inputs.
          SmallVector<Value, 4> inputs;
          inputs.reserve(quantizing_op->getNumOperands());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td

    }
    
    def QuantizeCompositeFunctionsPass : Pass<"stablehlo-quantize-composite-functions", "ModuleOp"> {
      let summary = "Quantize composite functions with QDQ input / outputs.";
      let options = [
        Option<"enable_per_channel_quantized_weight_",
            "enable-per-channel-quantized-weight",
            "bool", /*default=*/"true",
            "Whether to enable per-channel quantized weights.">,
        Option<"mlir_dump_file_name_", "mlir-dump-file-name",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

    //
    //   1. Replaces quantized `TF::XlaCallModuleOp` with a `func::CallOp`.
    //   2. Quantizes the callee function.
    //
    // The inputs of this pattern assumes an invalid IR, where even if a
    // `TF::XlaCallModuleOp` is quantized the callee remains unquantized. Step (2)
    // not only replaces the input and output tensor types into quantized ones, but
    // also rewrites the body with a quantized equivalent.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

    };
    
    // Converts quantized tensor type with signed integer type to quantized tensor
    // type with unsigned integer type.
    Type ConvertSignedQuantizedToUnsigned(Type signed_tensor_type, Location loc);
    
    // Converts quantize ops with unsigned quantized types to these with signed
    // quantized types and preserves the scales.
    template <typename QuantizeOpT>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h

      bool verify_numeric = false;
      // Whether to add verification for layer by layer, or on whole model. When
      // disabled (per-layer) float and quantized ops will be run from same input
      // (output of previous quantized layer). When enabled, float and quantized ops
      // will run with respective float and quantized output of previous ops.
      bool whole_model_verify = false;
    
      // Whether to use fake quant attributes to calculate quantization parameters.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 10:16:19 UTC 2024
    - 10.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto

        // previous quantized layer (Please note that this part is different part
        // from DEBUGGER_TYPE_FLOAT_PER_LAYER). Each layer in the debugging model
        // has a DumpTensor, and it is used to save the entire value of outputs from
        // both the quantized and unquantized layer.
        DEBUGGER_TYPE_INT_PER_LAYER = 2;
        // DEBUGGER_TYPE_FLOAT_PER_LAYER creates a debugging model with both
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 14.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h

    namespace mlir::quant::stablehlo {
    
    // Checks whether an op is connected with a quantized composite function. If
    // not, the same-scale op will not be quantized. This decision is based on the
    // current assumption that the performance gain of the same-scale op itself
    // could not beat the overhead of the quantize and dequantize routines need to
    // be added around that op. When the assumption changes, this policy might
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc

          llvm::SmallVector<Value, 4> quantized;
          for (auto user : returned.getUsers()) {
            if (auto q = Quantized(user)) {
              quantized.push_back(q);
            }
          }
          if (quantized.size() == 1) {
            ret.setOperand(i, quantized.front());
          }
          i++;
        }
      });
    
      // We prefer to placing quantization emulation ops on the results of the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc

      // The quantized call op without the _quantization_method attribute is not
      // captured as a `QuantizationResult`.
      ASSERT_THAT(results.results(), IsEmpty());
    }
    
    TEST_F(QuantizationReportTest, InitializeWithModuleOpWithInvalidCalleeName) {
      // A quantized dot_general op but the callee function has an invalid name. It
      // is expected to start with `quantized_`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 10:10:34 UTC 2024
    - 18.5K bytes
    - Viewed (0)
Back to top