Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 149 for Quantized (0.14 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto

    // If the selected quantization option is not available, StableHLO quantizer
    // will raise an error.
    // NEXT ID: 2
    message CustomQuantizationMethod {
      // Specify component name, bit width, and other specs for all compoenents
      // intended to be quantized.
      repeated QuantizationComponentSpec quantization_component_spec = 1;
    }
    
    // Quantization spec per each component designated to be quantized.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 22 02:20:05 UTC 2023
    - 3.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

      string name = n;
      string asTraitArgsStr =
        !interleave(params, ", ") # !if(signed, ", true", ", false");
    }
    
    // Uniform quantized types. Two integers "smantissa" and "sexp" are used to
    // express the Mantissa and Exponent components of the floating-point scale so
    // the scale of the quantized type is "smantissa * 10 ^ sexp".
    class UInt8UniformQuantizedType<int zero_pt, int smantissa, int sexp>
        : QuantizedType<"Uniform",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h

    // Returns true iff `type` is a uniform quantized type whose storage type is
    // 32-bit integer and expressed type is f32.
    bool IsI32F32UniformQuantizedType(Type type);
    
    // Returns true iff `type` is a uniform quantized per-axis (per-channel) type
    // whose storage type is 32-bit integer and expressed type is f32.
    bool IsI32F32UniformQuantizedPerAxisType(Type type);
    
    // Determines whether the storage type of a quantized type is supported by
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc

                                << quantized_per_axis_type << ".\n");
        return false;
      }
    
      return true;
    }
    
    // Determines whether the storage type of a quantized type is supported by
    // `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported.
    bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) {
      if (storage_type.getWidth() == 8 ||
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/numerical_utils.cc

    }
    
    // Calculates the quantized range for a given scale, zero point, minimum and
    // maximum values, and quantization range.
    //
    // Args:
    //   scale: The scale factor for the quantized values.
    //   zero_point: The zero point for the quantized values.
    //   rmin: The minimum value of the quantized values.
    //   rmax: The maximum value of the quantized values.
    //   qmin: The minimum value of the quantization range.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 17 19:57:04 UTC 2023
    - 3.3K bytes
    - Viewed (0)
  6. tensorflow/c/tf_datatype.h

      TF_INT64 = 9,
      TF_BOOL = 10,
      TF_QINT8 = 11,     // Quantized int8
      TF_QUINT8 = 12,    // Quantized uint8
      TF_QINT32 = 13,    // Quantized int32
      TF_BFLOAT16 = 14,  // Float32 truncated to 16 bits.
      TF_QINT16 = 15,    // Quantized int16
      TF_QUINT16 = 16,   // Quantized uint16
      TF_UINT16 = 17,
      TF_COMPLEX128 = 18,  // Double-precision complex
      TF_HALF = 19,
      TF_RESOURCE = 20,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Sep 08 20:13:32 UTC 2023
    - 2.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir

        return %7 : tensor<1x3xf32>
      }
    // Test that the inputs and output of the tf.XlaCallModule op has been replaced
    // by quantized types, and the corresponding quantfork.dcast ops that turned
    // those quantized types back to float types are removed.
    // CHECK: %[[CONST_0:.+]] = stablehlo.constant dense<1.000000e+00> : tensor<4x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 01:38:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

        METHOD_NO_QUANTIZE = 1;
    
        // Static range quantization. Quantized tensor values' ranges are statically
        // determined. The activation and weight are quantized to INT8 while bias is
        // quantized to INT32.
        METHOD_STATIC_RANGE_INT8 = 2;
    
        // Dynamic range quantization. Quantized tensor values' ranges are
        // determined in the graph executions. The weights are quantized during
        // conversion.
        METHOD_DYNAMIC_RANGE_INT8 = 3;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_composite_functions.cc

    };
    
    void QuantizeCompositeFunctionsPass::runOnOperation() {
      MLIRContext& ctx = getContext();
    
      PassManager pm(&ctx);
      // Intermediate output from QuantizePass will have quantized ops
      // (XlaCallModuleOps) with quantized input and output types, which are not
      // allowed in the TF dialect.
      pm.enableVerifier(false);
    
      PrepareQuantizePassOptions options;
      options.enable_per_channel_quantized_weight_ =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 02:59:01 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/cc/report.cc

        return result;
      } else {
        return std::nullopt;
      }
    }
    
    // Populates quantized ops from `module_op` to `results`. After going through
    // the quantization passes, quantized ops are represented as `func::CallOp` with
    // a callee's prefix of `quantized_`.
    void PopulateQuantizedResults(ModuleOp module_op,
                                  QuantizationResults& results) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.8K bytes
    - Viewed (0)
Back to top