Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for QuantizeWeights (0.24 sec)

  1. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc

                              result.size());
    
      return kTfLiteOk;
    }
    
    TfLiteStatus QuantizeWeights(flatbuffers::FlatBufferBuilder* builder,
                                 const tflite::Model* input_model,
                                 int64_t weights_min_num_elements,
                                 bool use_hybrid_evaluation) {
      return QuantizeWeights(
          builder, input_model,
          /*inference_type=*/tflite::TensorType_INT8,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h

    // Overloading methods to support old quantizer versions API
    TfLiteStatus QuantizeWeights(flatbuffers::FlatBufferBuilder* builder,
                                 const tflite::Model* input_model,
                                 int64_t weights_min_num_elements,
                                 bool use_hybrid_evaluation = true);
    
    TfLiteStatus QuantizeWeights(flatbuffers::FlatBufferBuilder* builder,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

    namespace {
    tensorflow::string* g_test_model_dir = nullptr;
    }  // namespace
    
    namespace tflite {
    namespace optimize {
    namespace {
    
    using mlir::lite::BufferType;
    using mlir::lite::CustomOpMap;
    using mlir::lite::QuantizeWeights;
    constexpr bool kUseUpdatedHybridSchemeDefault = true;
    
    std::unique_ptr<ModelT> CreateMutableModelFromFile(const Model* input_model) {
      auto copied_model = std::make_unique<ModelT>();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc

        default:
          return absl::InvalidArgumentError("Quantized type not supported");
          break;
      }
    
      bool use_updated_hybrid_scheme = !quant_specs.disable_per_channel;
      if (::tflite::optimize::QuantizeWeights(
              &q_builder, input_model, quantized_type, use_updated_hybrid_scheme,
              ::tflite::optimize::QuantizerType::OLD_QUANTIZER) != kTfLiteOk) {
        return absl::InvalidArgumentError(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 23.8K bytes
    - Viewed (0)
Back to top