Search Options

Results per page
Sort
Preferred Languages
Advance

Results 151 - 160 of 193 for Quantile (0.22 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_xla.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-lift-quantizable-spots-as-functions -quant-quantize='target-opset=XLA' -verify-each=false | FileCheck %s
    
    func.func private @conv(%input: tensor<1x3x4x3xf32> {tf._user_specified_name = "input_tensor"}) -> tensor<*xf32> attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<1x3x4x3>]} {
      %weight = arith.constant dense_resource<__elided__> : tensor<2x3x3x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.td

      let regions = (region SizedRegion<1>:$body);
      let hasVerifier = 1;
    }
    
    def quantfork_ReturnOp : quantfork_Op<"return", [Terminator]> {
      let summary = [{
        The `return` operation terminates a quantize region and returns values.
      }];
    
      let arguments = (ins Variadic<AnyTensor>:$results);
    }
    
    //===----------------------------------------------------------------------===//
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 13 12:46:08 UTC 2022
    - 10.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

        readonly_model_ = input_model_->GetModel();
        model_ = UnPackFlatBufferModel(*readonly_model_);
      }
    };
    
    TEST_F(QuantizeLSTM2Test, VerifyLSTM) {
      // Quantize model.
      auto status = QuantizeModelAllOperators(
          &model_, TensorType_FLOAT32, TensorType_FLOAT32,
          /*allow_float=*/false, TensorType_INT8, output_buffer_);
      ASSERT_THAT(status, Eq(kTfLiteOk));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir

    }
    // CHECK-LABEL: uniform_quantize_op_quantized_input
    // CHECK: stablehlo.uniform_quantize
    // CHECK-NOT: tfl.quantize
    
    // -----
    
    // Tests that the pattern doesn't match when the output tensor's storage type
    // is ui16. ui16 storage type for quantized type is not compatible with
    // `tfl.quantize`.
    
    func.func @uniform_quantize_op_uint16_output(%arg: tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<ui16:f32, 3.000000e+0:127>> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 106.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir

      // CHECK-DAG: %[[VAL5:.+]] = "tfl.quantize"(%4) <{qtype = tensor<1x32x32x16x!quant.uniform<i8:f32, 1.000000e+00>>}>
      // CHECK: return %[[VAL5]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/BUILD

            "passes/prepare_quantize.inc",
            "passes/prepare_quantize_drq.cc",
            "passes/preprocess_op.cc",
            "passes/preprocess_op.inc",
            "passes/propagate_quantize_type.cc",
            "passes/quantize.cc",
            "passes/quantize_composite_functions.cc",
            "passes/quantize_composite_functions.inc",
            "passes/quantize_weights.cc",
            "passes/quantized_function_library.h",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 22:58:42 UTC 2024
    - 21.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

        bool enable_legacy_weight_only = false,
        std::optional<const absl::string_view> mlir_dump_file_prefix =
            std::nullopt);
    
    // Converts dequantize-(quantizable) call-quantize pattern to a single call op
    // that has quantized input and output types. It is expected for this pass to
    // emit illegal IR with unsupported quantized input and output types. The
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc

    }
    
    // stablehlo.uniform_quantize -> tfl.quantize
    // TODO: b/322428814 - Add StableHLO quantizer integration tests for ODML.
    class RewriteUniformQuantizeOp
        : public OpRewritePattern<stablehlo::UniformQuantizeOp> {
      using OpRewritePattern<stablehlo::UniformQuantizeOp>::OpRewritePattern;
    
      // Determines whether the input and output types are compatible with
      // `tfl.quantize`. See the definition for the `QUANTIZE` kernel for the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 09:00:19 UTC 2024
    - 99.8K bytes
    - Viewed (0)
  9. src/runtime/mgcpacer_test.go

    	return func() float64 {
    		sum := f()
    		for _, s := range fs {
    			sum += s()
    		}
    		return sum
    	}
    }
    
    // quantize returns a new stream that rounds f to a multiple
    // of mult at each step.
    func (f float64Stream) quantize(mult float64) float64Stream {
    	return func() float64 {
    		r := f() / mult
    		if r < 0 {
    			return math.Ceil(r) * mult
    		}
    		return math.Floor(r) * mult
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 19 13:53:21 UTC 2023
    - 39.3K bytes
    - Viewed (0)
  10. docs/fr/docs/async.md

    Cela prendrait autant de temps pour finir avec ou sans sections (concurrence) et vous auriez effectué la même quantité de travail.
    
    Registered: Mon Jun 17 08:32:26 UTC 2024
    - Last Modified: Sun Mar 31 23:52:53 UTC 2024
    - 24K bytes
    - Viewed (0)
Back to top