Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 29 for TfLite (0.29 sec)

  1. tensorflow/compiler/mlir/lite/flatbuffer_export.cc

            } else {
              return tflite::TensorType_INT4;
            }
          case 8:
            return itype.isUnsigned() ? tflite::TensorType_UINT8
                                      : tflite::TensorType_INT8;
          case 16:
            return itype.isUnsigned() ? tflite::TensorType_UINT16
                                      : tflite::TensorType_INT16;
          case 32:
            return itype.isUnsigned() ? tflite::TensorType_UINT32
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:41:49 UTC 2024
    - 164.5K bytes
    - Viewed (2)
  2. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc

                                 const tflite::Model* input_model,
                                 BufferType quant_type,
                                 bool use_updated_hybrid_scheme) {
      tflite::TensorType inference_type;
      switch (quant_type) {
        case BufferType::QUANTIZED_FLOAT16:
          inference_type = tflite::TensorType_FLOAT16;
          break;
        default:
          inference_type = tflite::TensorType_INT8;
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/python/converter_python_api.cc

    }
    
    tflite::TensorType FromTocoDataTypeToTflitToTensorType(int inference_type) {
      switch (inference_type) {
        case toco::IODataType::QUANTIZED_INT16:
          return tflite::TensorType_INT16;
        case toco::IODataType::QUANTIZED_UINT8:
          return tflite::TensorType_UINT8;
        case toco::IODataType::UINT8:
          return tflite::TensorType_UINT8;
        case toco::IODataType::QUANTIZED_INT8:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 19.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/python/converter_python_api_wrapper.cc

            return tensorflow::PyoOrThrow(
                tflite::MlirSparsifyModel(input_contents_txt_raw.ptr()));
          },
          py::arg("input_contents_txt_raw"),
          R"pbdoc(
          Returns a sparsified model.
        )pbdoc");
      m.def(
          "RegisterCustomOpdefs",
          [](py::object custom_opdefs_txt_raw) {
            return tensorflow::PyoOrThrow(
                tflite::RegisterCustomOpdefs(custom_opdefs_txt_raw.ptr()));
          },
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 18:18:30 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc

                   << ", input_inference_type: "
                   << tflite::EnumNameTensorType(input_type)
                   << ", output_inference_type: "
                   << tflite::EnumNameTensorType(output_type) << "\n";
      mlir::Builder mlir_builder(&context);
      mlir::Type input_mlir_type =
          tflite::ConvertElementType(input_type, mlir_builder);
      mlir::Type output_mlir_type =
          tflite::ConvertElementType(output_type, mlir_builder);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc

    namespace {
    
    
    TEST(SparsifyModelTest, MetadataIsAddedToOutputModel) {
      std::string expected_key = tflite::optimize::kTfLiteReducedPrecisionKey;
      std::string expected_value = "test_data";
    
      // Load input model
      auto input_fbm = tflite::FlatBufferModel::BuildFromFile(
          "tensorflow/lite/testdata/sparse_tensor.bin");
      tflite::ModelT input_model;
      input_fbm->GetModel()->UnPackTo(&input_model);
    
      // Populate input metadata
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:16:40 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/sparsity/sparsify_model.cc

    namespace lite {
    
    absl::Status SparsifyModel(const tflite::ModelT& input_model,
                               flatbuffers::FlatBufferBuilder* builder) {
      MLIRContext context;
      StatusScopedDiagnosticHandler statusHandler(&context,
                                                  /*propagate=*/true);
    
      // Import input_model to a MLIR module
      flatbuffers::FlatBufferBuilder input_builder;
      flatbuffers::Offset<tflite::Model> input_model_location =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:16:40 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h

    // dynamic range quantization for verify_numeric and whole_model_verify flags.
    TfLiteStatus QuantizeWeights(
        flatbuffers::FlatBufferBuilder* builder, const tflite::Model* input_model,
        const tflite::TensorType& inference_type,
        const absl::flat_hash_set<std::string>& denylisted_ops,
        const CustomOpMap& custom_op_map,
        int64_t minimum_elements_for_weights = 1024,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h

    // of double, and call TOCO's quantization routines to maintain bit-exactness of
    // the values with the TOCO quantizer.
    TfLiteStatus QuantizeModel(
        absl::string_view model_buffer, const tflite::TensorType &input_type,
        const tflite::TensorType &output_type,
        const tflite::TensorType &inference_type,
        const std::unordered_set<std::string> &operator_names,
        bool disable_per_channel, bool fully_quantize, std::string &output_buffer,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 2.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/execution_metadata_exporter.cc

    #include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
    #include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
    #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
    
    namespace tflite {
    namespace {
    
    bool IsConst(mlir::Operation* op) {
      return llvm::isa<mlir::arith::ConstantOp, mlir::TF::ConstOp,
                       mlir::TFL::ConstOp, mlir::TFL::QConstOp>(op);
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 06:11:34 UTC 2024
    - 7.5K bytes
    - Viewed (0)
Back to top