Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 283 for Scales (1.66 sec)

  1. tensorflow/compiler/mlir/lite/utils/const_tensor_utils.cc

      // Zero scales we make the minimum fp value, this is because some flatbuffers
      // contain zero scale for zero values.
      llvm::SmallVector<double> scales;
      for (float scale : quant_params.scale) {
        if (scale == 0) {
          scales.push_back(std::numeric_limits<float>::min());
          continue;
        }
        scales.push_back(scale);
      }
    
      // Scale size can't be zero as it is checked before.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc

          SmallVector<double>(scales), SmallVector<int64_t>(zero_points),
          quantization_dimension,
          /*storageTypeMin=*/llvm::minIntN(8) + (narrow_range ? 1 : 0),
          /*storageTypeMax=*/llvm::maxIntN(8));
    }
    
    UniformQuantizedPerAxisType CreateI32F32UniformQuantizedPerAxisType(
        const Location loc, MLIRContext& context, const ArrayRef<double> scales,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  3. pkg/controller/deployment/recreate.go

    	return false
    }
    
    // scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate".
    func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(ctx context.Context, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
    	scaled, _, err := dc.scaleReplicaSetAndRecordEvent(ctx, newRS, *(deployment.Spec.Replicas), deployment)
    	return scaled, err
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Oct 13 20:32:13 UTC 2021
    - 4.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/device_target.h

      OutputInputSameScale,
      OutputInputFreeScale,
      CustomScale,
    };
    
    // Each kernel signature has its own specification for scales.
    struct KernelSpec {
      // Scale constraint
      ScaleConstraintType type;
    
      // Custom function to derive the scales. Only available when the scale
      // constraint is `CustomScale`.
      ScaleFn scale_fn;
    };
    
    class KernelSpecs {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 08 10:41:08 UTC 2024
    - 7.1K bytes
    - Viewed (0)
  5. cluster/addons/dns-horizontal-autoscaler/README.md

    # DNS Horizontal Autoscaler
    
    DNS Horizontal Autoscaler enables horizontal autoscaling feature for DNS service
    in Kubernetes clusters. This autoscaler runs as a Deployment. It collects cluster
    status from the APIServer, horizontally scales the number of DNS backends based
    on demand. Autoscaling parameters could be tuned by modifying the `kube-dns-autoscaler`
    ConfigMap in `kube-system` namespace.
    
    Learn more about:
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Thu Aug 13 20:03:37 UTC 2020
    - 596 bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc

        int64_t storage_type_min, int64_t storage_type_max,
        int64_t quantized_dimension, PatternRewriter &rewriter) {
      // Check whether the scales operand has constant op.
      DenseFPElementsAttr scales;
      if (!matchPattern(scales_value, m_Constant(&scales))) {
        return rewriter.notifyMatchFailure(op, "scales must be constant");
      }
    
      // Check whether the zero_points operand has constant op.
      DenseIntElementsAttr zero_points;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 30.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

    class FixedResultScale<QuantizedType qt> : NativeOpTrait<!strconcat(
      "quant::FixedResult", qt.name, "Scale<", qt.asTraitArgsStr, ">::Impl")>;
    
    // Specify this trait if the bias-th input of the op is a bias input, which
    // needs a scale based on the scales of op1 and op2.
    class AccumulatorUniformScale<int bias, int op1, int op2> : NativeOpTrait<
      !strconcat("quant::AccumulatorUniformScale<",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/tac/transforms/transform_patterns.td

                       "-1.0f">), TFL_AF_None), $act)>;
    
    // Squash tfl.dequantize and tfl.quantize pairs.
    // TODO(b/185915462): Compare the scale of input and output. This can also be
    // squashed to a requantize op if the scales are different.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Sep 29 21:02:21 UTC 2022
    - 1.4K bytes
    - Viewed (0)
  9. pkg/controller/replicaset/metrics/metrics.go

    			"The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate's effect on " +
    			"the sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting.",
    		Buckets:        metrics.ExponentialBuckets(0.25, 2, 6),
    		StabilityLevel: metrics.ALPHA,
    	},
    )
    
    // Register registers ReplicaSet controller metrics.
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Thu Jun 01 15:28:42 UTC 2023
    - 1.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td

      CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isF32()">, "float constant tensor">;
    
    // Squash tfl.dequantize and tfl.quantize pairs.
    // TODO(fengliuai): Compare the scale of input and output. This can also be
    // squashed to a requantize op if the scales are different.
    def : Pat<(TFL_QuantizeOp (TFL_DequantizeOp $in), $qt), (replaceWithValue $in)>;
    
    // If the tfl.dequantize op wasn't fused, we shouldn't quantize the floating
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 2.3K bytes
    - Viewed (0)
Back to top