Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 42 for calibrate (0.39 sec)

  1. tensorflow/compiler/mlir/tfr/tests/canonicalize.mlir

    // CHECK: return %[[scale]], %[[zp]]
    }
    
    // CHECK-LABEL: quant_qparam_invalid
    func.func @quant_qparam_invalid(%arg0: tensor<1x3x!quant.calibrated<f32<-1.0:1.0>>>) -> (!tfr.tensor, !tfr.tensor) {
      %0 = "tfr.cast"(%arg0) : (tensor<1x3x!quant.calibrated<f32<-1.0:1.0>>>) -> !tfr.tensor
      %scale, %zp = tfr.quant_qparam(%0) : (!tfr.tensor) -> (!tfr.tensor, !tfr.tensor)
      func.return %scale, %zp: !tfr.tensor, !tfr.tensor
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/integration_test/custom_aggregator_op_test.py

    import tensorflow  # pylint: disable=unused-import
    
    from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2 as stablehlo_quant_config_pb2
    from tensorflow.compiler.mlir.quantization.tensorflow.calibrator import custom_aggregator_op_wrapper
    from tensorflow.python import pywrap_tensorflow  # pylint: disable=unused-import
    from tensorflow.python.framework import dtypes
    from tensorflow.python.framework import ops
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc

          // Quantize inputs of quantizable composite functions.
          for (OpOperand &input : op->getOpOperands()) {
            Type element_type = getElementTypeOrSelf(input.get().getType());
            // Non-float cases won't be calibrated.
            if (!element_type.isF32()) {
              continue;
            }
    
            // Skip when there is any already existing CustomAggregatorOp found.
            Operation *defining_op = input.get().getDefiningOp();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

        %9, %9,
        %recurrent_stats, %cell_stats,
        %9, %9, %9, %9) {
          asymmetric_quantize_inputs = false,
          cell_clip = 1.000000e+01 : f32,
          effective_hidden_scale_intermediate = tensor<0x!quant.calibrated<f32<0.0:1.0>>>,
          fused_activation_function = "TANH",
          input_to_cell_intermediate = tensor<0xf32>,
          input_to_forget_intermediate = tensor<0xf32>,
          input_to_input_intermediate = tensor<0xf32>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/utils/const_tensor_utils.cc

                                           elem_type.getIntOrFloatBitWidth());
      }
    
      // Intermediate tensors with calibration value (but not scale and zero points)
      // should return calibrated quantized type.
      if (is_intermediate && tensor.quantization != nullptr &&
          !IsQuantized(tensor)) {
        TF_ASSIGN_OR_RETURN(elem_type, GetCalibratedQuantizedType(tensor, builder));
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py

    import itertools
    import logging
    
    import numpy as np
    
    from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2 as stablehlo_quant_config_pb2
    from tensorflow.compiler.mlir.quantization.tensorflow.calibrator import calibration_statistics_pb2 as calib_stats_pb2
    
    
    _CalibrationMethod = (
        stablehlo_quant_config_pb2.CalibrationOptions.CalibrationMethod
    )
    _REGISTRY = {}
    
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 11 19:29:56 UTC 2024
    - 14.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

        op.erase();
    
        return success();
      }
    
     private:
      int num_bits;
      bool narrow_range;
      bool is_signed;
      bool legacy_float_scale;
    
      // Emits an op warning message if the calibrated range is larger than 10.0 and
      // the storage type is less than or equal to 8 bits.
      void TensorRangeSanityCheck(quantfork::StatisticsOp op, double& min,
                                  double& max) const {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

        // Types of the optional intermediate tensors, which exist for fully
        // quantized LSTM op and hold the ranges of the intermediate tensors.
        // The type for intermediate tensors are be quant.calibrated when imported
        // to only store calibrated min, max values. The proper quantization spec is
        // determined while going through quantization passes.
        OptionalAttr<TypeAttr>:$input_to_input_intermediate,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  9. pkg/controller/podautoscaler/replica_calculator_test.go

    	tc.runTest(t)
    }
    
    // TestComputedToleranceAlgImplementation is a regression test which
    // back-calculates a minimal percentage for downscaling based on a small percentage
    // increase in pod utilization which is calibrated against the tolerance value.
    func TestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
    
    	startPods := int32(10)
    	// 150 mCPU per pod.
    	totalUsedCPUOfAllPods := int64(startPods * 150)
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat Aug 19 03:31:34 UTC 2023
    - 68.4K bytes
    - Viewed (0)
  10. RELEASE.md

            `is_dynamic_op=False` would be set.
        *   `converter.convert()` no longer returns a `tf.function`. Now the
            function must be accessed from the saved model.
        *   The `converter.calibrate()` method has been removed. To trigger
            calibration, a `calibration_input_fn` should be provided to
            `converter.convert()`.
    
    *   Other:
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 730.3K bytes
    - Viewed (0)
Back to top