Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for Motivation (0.15 sec)

  1. tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_patterns.td

      (TF_MaximumOp (TF_ConstOp:$cst $cst_val), $input),
      (TF_MaximumOp $input, $cst)>;
    
    def SwapMinimumOperands : Pat<
      (TF_MinimumOp (TF_ConstOp:$cst $cst_val), $input),
      (TF_MinimumOp $input, $cst)>;
    
    // Relu1 activation is represented as a couple of Max and Min ops, The following
    // patterns recognize and keep them as TF ops so they can be converted to the
    // TFLite Relu1 op.
    def MatchRelu1Pattern1 : Pat<
      (TF_MinimumOp:$min_op
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Sep 29 21:02:21 UTC 2022
    - 3.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto

        // conversion, then dequantized during inference.
        // Activation: f32, Weight: qi8, Bias: f32
        WEIGHT_ONLY = 1;
    
        // Apply default dynamic range quantization. Quantized tensor value's
        // ranges are determined during graph runtime.
        // Activation: f32, Weight: qi8, Bias: f32
        POST_TRAINING_QUANTIZATION_DYNAMIC_RANGE = 2;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 22 02:20:05 UTC 2023
    - 3.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

        there's any, and set it to True. The reason behind this decision is that
        generally activations of these ops show better accuracy with asymmetric
        input quantization so we want to deprecate symmetric activation quantization
        for those ops eventually.
        - Unlike to the old quantizer, per-channel quantization is supported for
        weight-only TransposeConvOp.
      }];
    
      let methods = [
        InterfaceMethod<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

        // optimizations in the pipeline.
        METHOD_NO_QUANTIZE = 1;
    
        // Static range quantization. Quantized tensor values' ranges are statically
        // determined. The activation and weight are quantized to INT8 while bias is
        // quantized to INT32.
        METHOD_STATIC_RANGE_INT8 = 2;
    
        // Dynamic range quantization. Quantized tensor values' ranges are
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td

      let cppNamespace = "::mlir::TFL";
    }
    
    def TFL_DimensionTypeAttr : EnumAttr<TFL_Dialect, TFL_DimensionType,
        "dimension_type_attr"> {
      let convertFromStorage = "$_self";
    }
    
    // Allowed activation function cases
    // These should match the ActivationFunctionType enum in TFLite schema.
    def TFL_AFEnum_None  : I32EnumAttrCase<"NONE", 0>;
    def TFL_AFEnum_Relu  : I32EnumAttrCase<"RELU", 1>;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 20 00:05:24 UTC 2022
    - 6.4K bytes
    - Viewed (0)
Back to top