Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 76 for hilite (0.09 sec)

  1. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc

    #include "tensorflow/compiler/mlir/lite/common/tfl_pass_config.h"
    #include "tensorflow/compiler/mlir/lite/debug/debug.h"
    #include "tensorflow/compiler/mlir/lite/flatbuffer_export.h"
    #include "tensorflow/compiler/mlir/lite/flatbuffer_import.h"
    #include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
    #include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
    #include "tensorflow/compiler/mlir/lite/tf_tfl_passes.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc

    #include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
    #include "tensorflow/lite/core/model_builder.h"
    #include "tensorflow/lite/tools/optimize/reduced_precision_support.h"
    
    namespace mlir {
    namespace lite {
    namespace {
    
    
    TEST(SparsifyModelTest, MetadataIsAddedToOutputModel) {
      std::string expected_key = tflite::optimize::kTfLiteReducedPrecisionKey;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:16:40 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h

    #include "absl/container/flat_hash_set.h"
    #include "flatbuffers/flatbuffer_builder.h"  // from @flatbuffers
    #include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
    #include "tensorflow/lite/c/c_api_types.h"
    #include "tensorflow/lite/model.h"
    
    namespace mlir {
    namespace lite {
    
    // Supported resulting types from quantization process.
    enum class BufferType { QUANTIZED_INT8, QUANTIZED_FLOAT16 };
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/utils/utils.cc

    #include "mlir/Parser/Parser.h"  // from @llvm-project
    #include "mlir/Support/FileUtilities.h"  // from @llvm-project
    #include "tensorflow/compiler/mlir/lite/flatbuffer_export.h"
    #include "tensorflow/compiler/mlir/lite/flatbuffer_import.h"
    #include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
    
    namespace mlir {
    namespace TFL {
    namespace tac {
    
    absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ImportFlatbufferOrMlir(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 03 03:47:03 UTC 2024
    - 4.5K bytes
    - Viewed (0)
  5. src/math/big/floatmarsh.go

    	z.mode = RoundingMode((b >> 5) & 7)
    	z.acc = Accuracy((b>>3)&3) - 1
    	z.form = form((b >> 1) & 3)
    	z.neg = b&1 != 0
    	z.prec = byteorder.BeUint32(buf[2:])
    
    	if z.form == finite {
    		if len(buf) < 10 {
    			return errors.New("Float.GobDecode: buffer too small for finite form float")
    		}
    		z.exp = int32(byteorder.BeUint32(buf[6:]))
    		z.mant = z.mant.setBytes(buf[10:])
    	}
    
    	if oldPrec != 0 {
    		z.mode = oldMode
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 13 21:31:58 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/utils/convert_type.cc

        case tflite::TensorType_FLOAT16:
          return builder.getF16Type();
        case tflite::TensorType_BFLOAT16:
          return builder.getBF16Type();
        case tflite::TensorType_FLOAT32:
          return builder.getF32Type();
        case tflite::TensorType_FLOAT64:
          return builder.getF64Type();
        case tflite::TensorType_INT32:
          return builder.getIntegerType(32);
        case tflite::TensorType_UINT16:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 8.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/legalize_jax_random.cc

    #include "stablehlo/dialect/StablehloOps.h"  // from @stablehlo
    #include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
    #include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
    #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
    
    namespace mlir {
    namespace TFL {
    namespace {
    #define GEN_PASS_DEF_LEGALIZEJAXRANDOMPASS
    #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc"
    
    struct LegalizeJaxRandomPass
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc

    #include "tensorflow/compiler/mlir/lite/common/tfl_pass_config.h"
    #include "tensorflow/compiler/mlir/lite/flatbuffer_export.h"
    #include "tensorflow/compiler/mlir/lite/flatbuffer_import.h"
    #include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
    #include "tensorflow/compiler/mlir/lite/tf_tfl_passes.h"
    #include "tensorflow/compiler/mlir/lite/transforms/passes.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/utils/string_utils.cc

        return -1;
      }
    
      // Set num of string
      //
      // NOTE: The string buffer is accessed here as if it's native endian (instead
      // of small endian, as documented in the header). This will protentially break
      // when TFLite is ported to big endian platforms.
      // TODO(b/165919229): This code will need changing if/when we port to a
      // big-endian platform.
      memcpy(*buffer, &num_strings, sizeof(int32_t));
    
      // Set offset of strings.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:41:49 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/importer_test_min_max.cc

    #include "llvm/Support/raw_ostream.h"
    #include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
    #include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
    #include "tensorflow/lite/model.h"
    
    using llvm::cl::opt;
    
    // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s.mlir -o - \
    // RUN:   | %p/importer_test_min_max - \
    // RUN:   | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - \
    // RUN:   | FileCheck %s
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 6.8K bytes
    - Viewed (0)
Back to top