Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 306 for Quantized (0.27 sec)

  1. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc

                                << quantized_per_axis_type << ".\n");
        return false;
      }
    
      return true;
    }
    
    // Determines whether the storage type of a quantized type is supported by
    // `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported.
    bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) {
      if (storage_type.getWidth() == 8 ||
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h

      bool verify_numeric = false;
      // Whether to add verification for layer by layer, or on whole model. When
      // disabled (per-layer) float and quantized ops will be run from same input
      // (output of previous quantized layer). When enabled, float and quantized ops
      // will run with respective float and quantized output of previous ops.
      bool whole_model_verify = false;
    
      // Whether to use fake quant attributes to calculate quantization parameters.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 10:16:19 UTC 2024
    - 10.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/numerical_utils.cc

    }
    
    // Calculates the quantized range for a given scale, zero point, minimum and
    // maximum values, and quantization range.
    //
    // Args:
    //   scale: The scale factor for the quantized values.
    //   zero_point: The zero point for the quantized values.
    //   rmin: The minimum value of the quantized values.
    //   rmax: The maximum value of the quantized values.
    //   qmin: The minimum value of the quantization range.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 17 19:57:04 UTC 2023
    - 3.3K bytes
    - Viewed (0)
  4. tensorflow/c/tf_datatype.h

      TF_INT64 = 9,
      TF_BOOL = 10,
      TF_QINT8 = 11,     // Quantized int8
      TF_QUINT8 = 12,    // Quantized uint8
      TF_QINT32 = 13,    // Quantized int32
      TF_BFLOAT16 = 14,  // Float32 truncated to 16 bits.
      TF_QINT16 = 15,    // Quantized int16
      TF_QUINT16 = 16,   // Quantized uint16
      TF_UINT16 = 17,
      TF_COMPLEX128 = 18,  // Double-precision complex
      TF_HALF = 19,
      TF_RESOURCE = 20,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Sep 08 20:13:32 UTC 2023
    - 2.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir

        return %7 : tensor<1x3xf32>
      }
    // Test that the inputs and output of the tf.XlaCallModule op has been replaced
    // by quantized types, and the corresponding quantfork.dcast ops that turned
    // those quantized types back to float types are removed.
    // CHECK: %[[CONST_0:.+]] = stablehlo.constant dense<1.000000e+00> : tensor<4x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 01:38:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

        }
        lines.push_back("");
        lines.push_back(absl::StrFormat(
            "Number of quantized layers with quantized outputs: %d/%d",
            total_quantized_func_count - float_output_func_count,
            total_quantized_func_count));
        lines.push_back(absl::StrFormat("Number of quantize layers added: %d",
                                        quantize_func_count));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc

    };
    
    // Matches the pattern for quantized convolution op and rewrites it to use
    // uniform quantized types.
    //
    // Currently assumes asymmetric per-tensor quantization for activations and
    // symmetric per-channel quantization for filters.
    //
    // This pattern represents the following derived equation, where:
    // * rn = real (expressed) value for tensor n
    // * qn = quantized value for tensor n
    // * sn = scale for tensor n
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

        METHOD_NO_QUANTIZE = 1;
    
        // Static range quantization. Quantized tensor values' ranges are statically
        // determined. The activation and weight are quantized to INT8 while bias is
        // quantized to INT32.
        METHOD_STATIC_RANGE_INT8 = 2;
    
        // Dynamic range quantization. Quantized tensor values' ranges are
        // determined in the graph executions. The weights are quantized during
        // conversion.
        METHOD_DYNAMIC_RANGE_INT8 = 3;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_composite_functions.cc

    };
    
    void QuantizeCompositeFunctionsPass::runOnOperation() {
      MLIRContext& ctx = getContext();
    
      PassManager pm(&ctx);
      // Intermediate output from QuantizePass will have quantized ops
      // (XlaCallModuleOps) with quantized input and output types, which are not
      // allowed in the TF dialect.
      pm.enableVerifier(false);
    
      PrepareQuantizePassOptions options;
      options.enable_per_channel_quantized_weight_ =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 02:59:01 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/ir/QuantOps.td

    //      quantized representation may be acceptable.
    //
    // Especially early in transformation, it is common to have pairs of
    // qcast/dcast at points where a transition to a quantized type is
    // required. In addition, it is also common to have an identity qcast
    // (where the operand and result type are not quantized) at all points where
    // it is legal to use a quantized representation (but is not known to be
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 09 03:10:59 UTC 2024
    - 10.2K bytes
    - Viewed (0)
Back to top