Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for input_model (0.29 sec)

  1. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc

      StringRef op_name(tflite_op_name.data(), tflite_op_name.size());
      return op_name.lower();
    }
    
    std::unique_ptr<tflite::ModelT> CreateMutableModelFromFile(
        const tflite::Model* input_model) {
      auto copied_model = std::make_unique<tflite::ModelT>();
      input_model->UnPackTo(copied_model.get(), nullptr);
      return copied_model;
    }
    }  // namespace
    
    // TODO(b/214314076): Support MLIR model as an input for the C++ dynamic range
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc

          "tensorflow/lite/testdata/sparse_tensor.bin");
      tflite::ModelT input_model;
      input_fbm->GetModel()->UnPackTo(&input_model);
    
      // Populate input metadata
      auto model_metadata_buffer = std::make_unique<tflite::BufferT>();
      model_metadata_buffer->data =
          std::vector<uint8_t>(expected_value.begin(), expected_value.end());
      input_model.buffers.push_back(std::move(model_metadata_buffer));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:16:40 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/sparsity/sparsify_model.cc

    namespace mlir {
    namespace lite {
    
    absl::Status SparsifyModel(const tflite::ModelT& input_model,
                               flatbuffers::FlatBufferBuilder* builder) {
      MLIRContext context;
      StatusScopedDiagnosticHandler statusHandler(&context,
                                                  /*propagate=*/true);
    
      // Import input_model to a MLIR module
      flatbuffers::FlatBufferBuilder input_builder;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:16:40 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h

                                 const tflite::Model* input_model,
                                 int64_t weights_min_num_elements,
                                 bool use_hybrid_evaluation = true);
    
    TfLiteStatus QuantizeWeights(flatbuffers::FlatBufferBuilder* builder,
                                 const tflite::Model* input_model,
                                 BufferType quant_type = BufferType::QUANTIZED_INT8,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/sparsity/sparsify_model.h

    #include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
    
    namespace mlir {
    namespace lite {
    
    // Sparsify the `input_model` and write the result to a flatbuffer `builder`.
    absl::Status SparsifyModel(const tflite::ModelT& input_model,
                               flatbuffers::FlatBufferBuilder* builder);
    }  // namespace lite
    }  // namespace mlir
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:16:40 UTC 2024
    - 1.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

      QuantizeWeightsTest() {}
    
      void LoadBasicModel() {
        input_model_ = ReadTestModel();
        model_ = input_model_->GetModel();
      }
    
      void LoadSharedWeightsModel() {
        input_model_ = ReadSharedWeightsTestModel();
        model_ = input_model_->GetModel();
      }
    
      void LoadGatherTestModel() {
        input_model_ = ReadGatherTestModel();
        model_ = input_model_->GetModel();
      }
    
      void LoadCustomOpTestModel() {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

    }
    
    class QuantizeModelTest : public testing::Test {
     protected:
      QuantizeModelTest() {
        input_model_ = ReadModel(internal::kConvModelWith0Plus10Weights);
        readonly_model_ = input_model_->GetModel();
        model_ = UnPackFlatBufferModel(*readonly_model_);
      }
    
      std::unique_ptr<FlatBufferModel> input_model_;
      const Model* readonly_model_;
      tflite::ModelT model_;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  8. maven-api-impl/src/main/java/org/apache/maven/internal/impl/model/DefaultModelBuilder.java

            inputModel = injectProfileActivations(inputModel, interpolatedActivations);
    
            // profile injection
            inputModel = profileInjector.injectProfiles(inputModel, activePomProfiles, request, problems);
            inputModel = profileInjector.injectProfiles(inputModel, activeExternalProfiles, request, problems);
    
            return inputModel;
        }
    
    Registered: Wed Jun 12 09:55:16 UTC 2024
    - Last Modified: Fri Jun 07 07:31:02 UTC 2024
    - 61.9K bytes
    - Viewed (0)
Back to top