Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for FinishModelBuffer (0.23 sec)

  1. tensorflow/compiler/mlir/lite/sparsity/sparsify_model.cc

      // Import input_model to a MLIR module
      flatbuffers::FlatBufferBuilder input_builder;
      flatbuffers::Offset<tflite::Model> input_model_location =
          tflite::Model::Pack(input_builder, &input_model);
      tflite::FinishModelBuffer(input_builder, input_model_location);
    
      std::string serialized_model(
          reinterpret_cast<const char*>(input_builder.GetBufferPointer()),
          input_builder.GetSize());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:16:40 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/importer_test_min_max.cc

        return 1;
      }
      flatbuffers::FlatBufferBuilder builder;
      flatbuffers::Offset<tflite::Model> output_model_location =
          tflite::Model::Pack(builder, maybe_module.value().get());
      tflite::FinishModelBuffer(builder, output_model_location);
      std::string output_model_content(
          reinterpret_cast<const char*>(builder.GetBufferPointer()),
          builder.GetSize());
      std::cout << output_model_content << "\n";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc

      flatbuffers::FlatBufferBuilder input_builder;
      flatbuffers::Offset<tflite::Model> input_model_location = tflite::Model::Pack(
          input_builder, CreateMutableModelFromFile(input_model).get());
      tflite::FinishModelBuffer(input_builder, input_model_location);
    
      std::string serialized_model(
          reinterpret_cast<const char*>(input_builder.GetBufferPointer()),
          input_builder.GetSize());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 9.5K bytes
    - Viewed (1)
  4. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

        const bool disable_per_channel_for_dense_layers = false) {
      TensorType inference_tensor_type = activations_type;
      const bool fully_quantize = !allow_float;
    
      flatbuffers::FlatBufferBuilder input_builder;
      tflite::FinishModelBuffer(input_builder,
                                tflite::Model::Pack(input_builder, model));
    
      const std::string input_buffer(
          reinterpret_cast<const char*>(input_builder.GetBufferPointer()),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/flatbuffer_export.cc

                                       description, builder_.CreateVector(buffers_),
                                       metadata_buffer, *metadata, *signature_defs);
      tflite::FinishModelBuffer(builder_, model);
      // There is a limit of 2GB for a flatbuffer.
      bool flatbuffer_limit_exceeded = builder_.GetSize() > flatbuffer_size_max;
      if (flatbuffer_limit_exceeded && require_use_buffer_offset_ == false) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:41:49 UTC 2024
    - 164.5K bytes
    - Viewed (2)
  6. tensorflow/compiler/mlir/lite/schema/schema_generated.h

      return verifier.VerifySizePrefixedBuffer<tflite::Model>(ModelIdentifier());
    }
    
    inline const char *ModelExtension() {
      return "tflite";
    }
    
    inline void FinishModelBuffer(
        ::flatbuffers::FlatBufferBuilder &fbb,
        ::flatbuffers::Offset<tflite::Model> root) {
      fbb.Finish(root, ModelIdentifier());
    }
    
    inline void FinishSizePrefixedModelBuffer(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 1M bytes
    - Viewed (0)
Back to top