Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 49 for Quantile (0.19 sec)

  1. istioctl/pkg/metrics/metrics.go

    	}
    
    	return sm, nil
    }
    
    func getLatency(promAPI promv1.API, workloadName, workloadNamespace string, duration time.Duration, quantile float64) (time.Duration, error) {
    	latencyQuery := fmt.Sprintf(`histogram_quantile(%f, sum(rate(%s_bucket{%s=~"%s.*", %s=~"%s.*",reporter="destination"}[%s])) by (le))`,
    		quantile, reqDur, destWorkloadLabel, workloadName, destWorkloadNamespaceLabel, workloadNamespace, duration)
    
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Sat Apr 13 05:23:38 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-quantize -verify-each=false | FileCheck %s
    
    // Tests for PopulateFusedGemmStylePatterns are handled in
    // quantize_composite_functions for module-level evaluation of functions.
    
    module attributes {tf_saved_model.semantics} {
    // CHECK: quantize_simple_xla_call_module(%[[ARG_0:.+]]: tensor<1x4xf32>)
      func.func private @quantize_simple_xla_call_module(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 01:38:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-lift-quantizable-spots-as-functions -quant-quantize -verify-each=false | FileCheck %s
    
    func.func private @conv(%input: tensor<1x3x4x3xf32> {tf._user_specified_name = "input_tensor"}) -> tensor<*xf32> attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<1x3x4x3>]} {
      %weight = arith.constant dense_resource<__elided__> : tensor<2x3x3x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize.cc

      patterns.add<StableHloQuantization, StableHloQuantizationReverse>(&ctx);
    
      PopulateCommonQuantizationPatterns(ctx, patterns,
                                         enable_per_channel_quantized_weight_);
    
      // Quantize all quantizable ops, including ops that are not compute-heavy.
      PopulateAllQuantizablePatterns(ctx, patterns);
    
      if (failed(applyPatternsAndFoldGreedily(module_op, std::move(patterns)))) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 07:08:19 UTC 2024
    - 5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range-float16.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize-dynamic-range="enable-float16-quantization" -tfl-quantize="enable-dynamic-range-quantization=true" | FileCheck --check-prefix=CHECK %s
    
    // CHECK-LABEL: QuantizeUnidirectionalLstm
    func.func @QuantizeUnidirectionalLstm(%arg0: tensor<1x2x3xf32>) -> (tensor<1x2x3xf32>) {
      %1 = "tfl.pseudo_const"() {value = dense<[[0.1]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h

        // dequantize ops, and insert them between the tf.FakeQuantWithMinMaxVarsOp
        // and its users.
        Value value = tf_op.getOutputs();
        auto quantize = rewriter.create<TFL::QuantizeOp>(
            tf_op.getLoc(), qtype.getValue(), value, qtype);
        auto dequantize = rewriter.create<TFL::DequantizeOp>(
            tf_op.getLoc(), res_type, quantize.getOutput());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/default_quant_params.mlir

    // RUN: tf-opt %s --tfl-default-quant --tfl-quantize | FileCheck %s
    
    // CHECK-LABEL: hardcode_all
    func.func @hardcode_all(%arg0: tensor<2x2xf32>, %arg1: tensor<2x1xf32>) -> tensor<2x2xf32> {
      %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function="NONE"}: (tensor<2x2xf32>, tensor<2x1xf32>) -> tensor<2x2xf32>
      func.return %0 : tensor<2x2xf32>
    
    // CHECK: %[[q0:.*]] = "tfl.quantize"(%arg1) <{qtype = tensor<2x1x!quant.uniform<u8:f32, 0.0078431372549019607:128>>}>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 8.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/quantization/tensorflow/tf_to_quant.cc

        return "Legalize TF to quant ops dialect";
      }
    };
    
    // Inserts a "tfl.quantize" and "tfl.dequantize" op pair (QDQs) after the
    // "tf.FakeQuantWithMinMaxVarsOp" to be constant folded. Since the constant
    // folding logic will use a "arith.constant" op to replace the
    // "tf.FakeQuantWithMinMaxVarsOp", the "tfl.quantize" op is used to preserve
    // the quantization parameters as a TypeAttr and "tfl.dequantize" op used to
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td

    include "tensorflow/compiler/mlir/lite/ir/tfl_ops.td"
    
    // Quantize attribute $0 by using quantization parameter from %1.
    def QuantizeByQuantizedType : NativeCodeCall<"quant::Quantize($0, $1.getValue())">;
    def F32ElementsAttr : ElementsAttrBase<
      CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isF32()">, "float constant tensor">;
    
    // Squash tfl.dequantize and tfl.quantize pairs.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  10. src/internal/trace/gc_test.go

    		mmuCurve2 := trace.NewMMUCurve(mu)
    		quantiles := []float64{0, 1 - .999, 1 - .99}
    		for window := time.Microsecond; window < time.Second; window *= 10 {
    			mud1 := mmuCurve.MUD(window, quantiles)
    			mud2 := mmuCurve2.MUD(window, quantiles)
    			for i := range mud1 {
    				if !aeq(mud1[i], mud2[i]) {
    					t.Errorf("for quantiles %v at window %v, want %v, got %v", quantiles, window, mud2, mud1)
    					break
    				}
    			}
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 5.3K bytes
    - Viewed (0)
Back to top