Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for IsQuantized (0.28 sec)

  1. tensorflow/compiler/mlir/lite/utils/const_tensor_utils.cc

          }
        }
        elem_type = mlir::TF::VariantType::get(tensor_types, builder.getContext());
      }
      if (IsQuantized(tensor) && !get_storage) {
        TF_ASSIGN_OR_RETURN(elem_type,
                            GetQuantizedType(tensor, builder, is_constant));
      } else if (IsQuantized(tensor) && get_storage) {
        // If the type is quantized we strip the signedness from the storage type.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/utils/const_tensor_utils.h

    #include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
    #include "tensorflow/core/framework/tensor.pb.h"
    #include "tensorflow/core/framework/tensor_shape.pb.h"
    
    namespace mlir {
    namespace TFL {
    
    bool IsQuantized(const tflite::TensorT& tensor);
    
    absl::StatusOr<mlir::quant::QuantizedType> GetQuantizedType(
        const tflite::TensorT& tensor, mlir::Builder builder,
        bool is_constant = false, mlir::Type storage_type = {});
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/flatbuffer_import.cc

                                            Value res) {
      // If the `tensor` has scale/zero_point, it must have been quantized, then the
      // min/max stats is just for comments, so ignore it.
      if (!tensor.quantization || tfl::IsQuantized(tensor)) return nullptr;
      // If the result isn't float and unquantizable, the min/max is ignored.
      if (!res.getType()
               .cast<mlir::ShapedType>()
               .getElementType()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 66.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc

      return op_quant_spec_getter_(op);
    }
    
    std::unique_ptr<OpQuantScaleSpec> QuantizationDriver::GetQuantScaleSpec(
        Operation* op) {
      return op_quant_scale_spec_getter_(op);
    }
    
    bool QuantizationDriver::IsQuantized(Operation* op) {
      for (int i = 0; i < op->getNumResults(); ++i) {
        if (GetResultQuantState(op, i).IsEmpty()) return false;
      }
      return true;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 38.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h

      std::unique_ptr<OpQuantScaleSpec> GetQuantScaleSpec(Operation* op);
    
      // Returns whether quantization parameters have been propagated to the results
      // of this op.
      bool IsQuantized(Operation* op);
    
      // Adds all the users of index-th result of op to the work list.
      void AddUserToList(Operation* op, const int index) {
        for (Operation* user : op->getResult(index).getUsers()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 20 11:42:17 UTC 2024
    - 16.8K bytes
    - Viewed (0)
Back to top