Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for UnPackTo (0.24 sec)

  1. tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc

      // Load input model
      auto input_fbm = tflite::FlatBufferModel::BuildFromFile(
          "tensorflow/lite/testdata/sparse_tensor.bin");
      tflite::ModelT input_model;
      input_fbm->GetModel()->UnPackTo(&input_model);
    
      // Populate input metadata
      auto model_metadata_buffer = std::make_unique<tflite::BufferT>();
      model_metadata_buffer->data =
          std::vector<uint8_t>(expected_value.begin(), expected_value.end());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:16:40 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

    //
    // This helper is useful as UnPackTo requires the input to not have any existing
    // state so directly calling UnPackTo could lead to memory leaks if the model
    // already had some state. Instead, the returned object from here can be used to
    // overwrite existing model.
    ModelT UnPackFlatBufferModel(const Model& flatbuffer_model) {
      ModelT model;
      flatbuffer_model.UnPackTo(&model);
      return model;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/python/converter_python_api.cc

      if (!model) {
        PyErr_Format(PyExc_ValueError, "Invalid model");
        return nullptr;
      }
      auto tflite_model = std::make_unique<tflite::ModelT>();
      model->GetModel()->UnPackTo(tflite_model.get(), nullptr);
    
      const tflite::TensorType inference_tensor_type =
          FromTocoDataTypeToTflitToTensorType(inference_type);
      const tflite::TensorType input_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 19.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc

      return op_name.lower();
    }
    
    std::unique_ptr<tflite::ModelT> CreateMutableModelFromFile(
        const tflite::Model* input_model) {
      auto copied_model = std::make_unique<tflite::ModelT>();
      input_model->UnPackTo(copied_model.get(), nullptr);
      return copied_model;
    }
    }  // namespace
    
    // TODO(b/214314076): Support MLIR model as an input for the C++ dynamic range
    // quantization API
    TfLiteStatus QuantizeWeights(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

    constexpr bool kUseUpdatedHybridSchemeDefault = true;
    
    std::unique_ptr<ModelT> CreateMutableModelFromFile(const Model* input_model) {
      auto copied_model = std::make_unique<ModelT>();
      input_model->UnPackTo(copied_model.get(), nullptr);
      return copied_model;
    }
    
    std::unique_ptr<FlatBufferModel> ReadTestModel() {
      auto model_path = tensorflow::io::JoinPath(
          *g_test_model_dir, internal::kConvModelWith0Plus10Weights);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
Back to top