Search Options

Results per page
Sort
Preferred Languages
Advance

Results 131 - 140 of 193 for Quantile (0.14 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize_per_channel.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-prepare-quantize=enable-per-channel-quantized-weight=true -verify-diagnostics | FileCheck %s
    
    // -----
    
    module {
      // CHECK-LABEL: conv_with_bias_and_relu
      func.func private @conv_with_bias_and_relu(%arg0: tensor<1x3x2x3xf32>) -> tensor<1x2x2x2xf32> {
        %cst = "tf.Const"() {device = "", value = dense<[7.11401462, 7.05456924]> : tensor<2xf32>} : () -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 26 07:48:15 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc

          mlir::TFL::PassConfig(quant_specs), pm);
    
      if (failed(pm.run(module.get()))) {
        absl::string_view err = statusHandler.ConsumeStatus().message();
        LOG(ERROR) << "Failed to quantize: " << err;
        return kTfLiteError;
      }
    
      // Export the results to the builder
      std::string result;
      tflite::FlatbufferExportOptions options;
      options.toco_flags.set_force_select_tf_ops(false);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions_weight_only.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -verify-diagnostics \
    // RUN:     -stablehlo-quantize-composite-functions | FileCheck --check-prefix=CHECK %s
    
    // Test that per-tensor weight-only quantized dot_general op is produced when
    // empty `weight_only_ptq` is provided.
    
    module attributes {tf_saved_model.semantics} {
      func.func private @quantize_dot_general_per_tensor(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> attributes {tf._original_func_name = "main_0"} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-prepare-quantize=enable-per-channel-quantized-weight=false -verify-diagnostics | FileCheck %s
    
    // -----
    
    // CHECK-LABEL: func @dot
    // CHECK-SAME: (%[[ARG_0:.*]]: tensor<?x3xf32>) -> tensor<?x2xf32>
    func.func @dot(%arg0: tensor<?x3xf32>) -> tensor<?x2xf32> {
      // CHECK: %[[cst:.*]] = stablehlo.constant
      // CHECK: %[[q1:.*]] = "quantfork.qcast"(%[[cst]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 19:52:06 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc

        // asymmetric range. For a state tensor, assigning correct quantization
        // parameters is sufficient, and for constants with asymmetric range it's
        // not correctly quantized by legacy quantizer so call the new Quantize.
        return Quantize(real_value, tensor_type);
      } else if (width == 16) {
        if (const auto uniform_type = dyn_cast<UniformQuantizedType>(q_type)) {
          const auto quantized_values =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 43.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

          quantization.}],
          "bool", "GetDynamicRangeQuantKernelSupport",
          (ins), [{}], [{return false;}]>,
        InterfaceMethod<
          [{Returns whether the op requires asymmetric quantize input attribute
          setting.}],
          "bool", "RequireAsymmetricQuantizeInputsAttr",
          (ins), [{}], [{return false;}]>,
      ];
    }
    
    // Specify this trait if the op has a fixed output value range.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc

            returned_type = quant::ConvertSignedQuantizedToUnsigned(
                dequantize_input.getType(), dequantize_op.getLoc());
            // replace the dequantize op by a quantize op
            TypeAttr type_attr = TypeAttr::get(returned_type);
            auto quantize_op = builder.create<QuantizeOp>(
                dequantize_op.getLoc(), returned_type, dequantize_input, type_attr);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td

    class UsedBy<string op> : Constraint<
      CPred<"llvm::isa<mlir::TFL::" # op # "Op>(*$0.getUsers().begin())">>;
    
    // When the op is passing-through, the output types of the quantized ops need
    // to be updated as well. Since the quantize op manages its own type by the
    // "qtype" attribute, we should update the type shape in this attribute.
    def ReorderTransposeDequantQuant :
          Pat<(TF_TransposeOp:$old_value
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_drq.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions='quantization-method=drq target-opset=UNIFORM_QUANTIZED' -quant-quantize-composite-functions='quantization-method=drq target-opset=UNIFORM_QUANTIZED' -symbol-dce | FileCheck %s
    
    module {
      // TODO(b/260020937): Support transpose_a, transpose_b for matmul.
      func.func @matmul(%arg0: tensor<2x12xf32>) -> (tensor<*xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

        // `stablehlo.convolution` assumes the following format:
        // [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f]
        // `stablehlo.dot_general` can take various formats. We only per-channel
        // quantize non-batch ops.
        // `stablehlo.dot_general` legalizable to `tfl.fully_connected` has a
        // filter rank of 2 with the last dimension as the channel dimension.
        const int64_t quantization_dimension =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
Back to top