Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 282 for tflite (0.15 sec)

  1. .github/ISSUE_TEMPLATE/tflite-other.md

    name: TensorFlow Lite Other Issue description: Use this template to report any
    issue in TensorFlow Lite that is not about Converters, Play Services or Ops
    body: - type: dropdown id: issue-type attributes: label: Issue Type description:
    What type of issue would you like to report? multiple: false options: - Bug -
    Build/Install - Performance - Support - Feature Request - Documentation Feature
    Request - Documentation Bug - Others validations: required: true - type:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Dec 29 22:28:29 UTC 2022
    - 3.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/utils/convert_type.cc

        case tflite::TensorType_FLOAT16:
          return builder.getF16Type();
        case tflite::TensorType_BFLOAT16:
          return builder.getBF16Type();
        case tflite::TensorType_FLOAT32:
          return builder.getF32Type();
        case tflite::TensorType_FLOAT64:
          return builder.getF64Type();
        case tflite::TensorType_INT32:
          return builder.getIntegerType(32);
        case tflite::TensorType_UINT16:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 8.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/mlir_tflite_runner.cc

        return 1;
    
      // Create TFLite interpreter & invoke converted program.
      std::unique_ptr<tflite::FlatBufferModel> model =
          tflite::FlatBufferModel::BuildFromBuffer(serialized_flatbuffer.c_str(),
                                                   serialized_flatbuffer.size());
      tflite::ops::builtin::BuiltinOpResolver builtins;
      std::unique_ptr<tflite::Interpreter> interpreter;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 03 00:14:05 UTC 2023
    - 6.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/importer_test_min_max.cc

    #include "llvm/Support/raw_ostream.h"
    #include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
    #include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
    #include "tensorflow/lite/model.h"
    
    using llvm::cl::opt;
    
    // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s.mlir -o - \
    // RUN:   | %p/importer_test_min_max - \
    // RUN:   | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - \
    // RUN:   | FileCheck %s
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc

                                 const tflite::Model* input_model,
                                 BufferType quant_type,
                                 bool use_updated_hybrid_scheme) {
      tflite::TensorType inference_type;
      switch (quant_type) {
        case BufferType::QUANTIZED_FLOAT16:
          inference_type = tflite::TensorType_FLOAT16;
          break;
        default:
          inference_type = tflite::TensorType_INT8;
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/utils/convert_type.h

    // Convert the scalar type of a TFLite tensor to the corresponding
    // Tensorflow type
    tensorflow::DataType TflTypeToTfType(tflite::TensorType type);
    
    // Convert the Tensorflow scalar type to the corresponding TFLite type
    absl::StatusOr<tflite::TensorType> TfTypeToTflType(tensorflow::DataType type);
    
    // Returns element type from attribute Type 'type_attr'.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 2.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/sparsity/sparsify_model.cc

      flatbuffers::Offset<tflite::Model> input_model_location =
          tflite::Model::Pack(input_builder, &input_model);
      tflite::FinishModelBuffer(input_builder, input_model_location);
    
      std::string serialized_model(
          reinterpret_cast<const char*>(input_builder.GetBufferPointer()),
          input_builder.GetSize());
    
      OwningOpRef<mlir::ModuleOp> module = tflite::FlatBufferToMlir(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:16:40 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/tac/tac_filter.proto

    syntax = "proto3";
    
    package third_party.tensorflow.compiler.mlir.lite.experimental.tac;
    
    // A list of filters for TAC users to run ops/functions on ML hardwares. The
    // intuition is that, for ops/functions that can be run on ML hardware (e.g.
    // EdgeTPU) and TFLite CPU, TAC users give a hint that they're more performant
    // to run on TFLite CPU. These filters give the TAC users freedom to specify the
    // parts that they want to use other hardware to accelerate.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 1.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/analyze_variables.cc

    // variables should be legalized to TFLite or not.
    const char kLegalizeTflVariables[] = "tfl._legalize_tfl_variables";
    
    // Returns true if 'op' is TF op that accepts resource type, but is
    // supported by TFLite.
    bool IsSupportedTFLiteResourceOp(Operation* op) {
      return llvm::isa<TF::ReadVariableOp, TF::AssignVariableOp, TF::VarHandleOp,
                       TF::LookupTableFindV2Op, TF::LookupTableImportV2Op,
                       TF::LookupTableSizeV2Op>(op);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc

                   << ", input_inference_type: "
                   << tflite::EnumNameTensorType(input_type)
                   << ", output_inference_type: "
                   << tflite::EnumNameTensorType(output_type) << "\n";
      mlir::Builder mlir_builder(&context);
      mlir::Type input_mlir_type =
          tflite::ConvertElementType(input_type, mlir_builder);
      mlir::Type output_mlir_type =
          tflite::ConvertElementType(output_type, mlir_builder);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 6.3K bytes
    - Viewed (0)
Back to top