Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 14 for QUANTIZED_INT8 (0.67 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/tests/compute-cost.mlir

      %0 = "tfl.pseudo_qconst"() {qtype = tensor<128x!quant.uniform<i32:f32, 0.7>>, value = dense<0> : tensor<128xi32>} : () -> tensor<128x!quant.uniform<i32:f32, 0.7>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:29:10 UTC 2022
    - 4.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

      // CHECK-NOT: tac.device tac.inference_type
      %1 = "tfl.pseudo_const"() {value = dense<[4, 384, 32]> : tensor<3xi32>} : () -> tensor<3xi32>
      // CHECK: tac.device = "CPU", tac.inference_type = "QUANTIZED_INT8"
      %2 = "tfl.reshape"(%arg0, %0) : (tensor<4x384x32x!quant.uniform<i8:f32, 0.2:-3>>, tensor<4xi32>) -> tensor<1x4x384x32x!quant.uniform<i8:f32, 0.2:-3>>
      // CHECK-NOT: tac.device tac.inference_type
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/tac/common/targets.h

    enum InferenceType {
      UNKNOWN = 0,
      FLOAT = 1,
      QUANTIZED_INT8 = 2,
      QUANTIZED_UINT8 = 3,
      HYBRID = 4
    };
    
    inline InferenceType GetInferenceTypeEnum(llvm::StringRef inference_type_str) {
      if (inference_type_str == "FLOAT") {
        return FLOAT;
      } else if (inference_type_str == "QUANTIZED_INT8") {
        return QUANTIZED_INT8;
      } else if (inference_type_str == "QUANTIZED_UINT8") {
        return QUANTIZED_UINT8;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 4.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.cc

          return 0;
        } else if (to_inference_type == QUANTIZED_INT8 ||
                   to_inference_type == QUANTIZED_UINT8) {
          // QUANT path.
          return kQuantCost * total_element_count_transferred;
        }
      }
    
      if (from_inference_type == QUANTIZED_INT8 ||
          from_inference_type == QUANTIZED_UINT8) {
        // Dequant path.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

      %0 = "tfl.pseudo_qconst"() {qtype = tensor<128x!quant.uniform<i32:f32, 0.7>>, value = dense<0> : tensor<128xi32>} : () -> tensor<128x!quant.uniform<i32:f32, 0.7>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/pick-subgraphs.mlir

        %0 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU6", tac.device = "CPU", tac.inference_type = "QUANTIZED_INT8"} : tensor<100x!quant.uniform<i8:f32, 2.000000e-01:-3>>
        %1 = tfl.mul %0, %arg2 {fused_activation_function = "RELU6", tac.device = "CPU", tac.inference_type = "QUANTIZED_INT8"} : tensor<100x!quant.uniform<i8:f32, 2.000000e-01:-3>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h

    #include "tensorflow/lite/c/c_api_types.h"
    #include "tensorflow/lite/model.h"
    
    namespace mlir {
    namespace lite {
    
    // Supported resulting types from quantization process.
    enum class BufferType { QUANTIZED_INT8, QUANTIZED_FLOAT16 };
    
    // Stores information about how to quantize a user-specified custom operation.
    // CustomOpInfo contains info of its corresponding CustomOp registered in the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

      %2 = "tfl.mul"(%arg0, %0) {tac.device = "CPU", tac.inference_type = "QUANTIZED_INT8", fused_activation_function = "NONE"} : (tensor<1x!quant.uniform<i8:f32, 0.003:-128>>, tensor<1x!quant.uniform<i8:f32, 0.003:-128>>) -> tensor<1x!quant.uniform<i8:f32, 0.003:-128>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/experimental/tac/common/utils.cc

      if (float_type_observed) {
        if (int8_type_observed || uint8_type_observed) {
          return HYBRID;
        } else {
          return FLOAT;
        }
      }
    
      if (int8_type_observed) {
        return QUANTIZED_INT8;
      }
    
      if (uint8_type_observed) {
        return QUANTIZED_UINT8;
      }
    
      // Default to float inference.
      return FLOAT;
    }
    
    }  // namespace tac
    }  // namespace TFL
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 06 05:37:07 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/transforms/get_alternative_subgraph.cc

      std::vector<InferenceDeviceType> all_device_inference_types;
      for (const auto& device : devices) {
        if (inference_type == QUANTIZED_INT8) {
          all_device_inference_types.push_back({device, QUANTIZED_INT8});
        } else if (inference_type == QUANTIZED_UINT8) {
          all_device_inference_types.push_back({device, QUANTIZED_UINT8});
        }
    
        // We will alway enable float.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 12.3K bytes
    - Viewed (0)
Back to top