Search Options

Results per page
Sort
Preferred Languages
Advance

Results 171 - 180 of 203 for dequantize (0.22 sec)

  1. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc

          mlir::TFL::PassConfig(quant_specs), pm);
    
      if (failed(pm.run(module.get()))) {
        absl::string_view err = statusHandler.ConsumeStatus().message();
        LOG(ERROR) << "Failed to quantize: " << err;
        return kTfLiteError;
      }
    
      // Export the results to the builder
      std::string result;
      tflite::FlatbufferExportOptions options;
      options.toco_flags.set_force_select_tf_ops(false);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions_weight_only.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -verify-diagnostics \
    // RUN:     -stablehlo-quantize-composite-functions | FileCheck --check-prefix=CHECK %s
    
    // Test that per-tensor weight-only quantized dot_general op is produced when
    // empty `weight_only_ptq` is provided.
    
    module attributes {tf_saved_model.semantics} {
      func.func private @quantize_dot_general_per_tensor(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> attributes {tf._original_func_name = "main_0"} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-prepare-quantize=enable-per-channel-quantized-weight=false -verify-diagnostics | FileCheck %s
    
    // -----
    
    // CHECK-LABEL: func @dot
    // CHECK-SAME: (%[[ARG_0:.*]]: tensor<?x3xf32>) -> tensor<?x2xf32>
    func.func @dot(%arg0: tensor<?x3xf32>) -> tensor<?x2xf32> {
      // CHECK: %[[cst:.*]] = stablehlo.constant
      // CHECK: %[[q1:.*]] = "quantfork.qcast"(%[[cst]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 19:52:06 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc

        // asymmetric range. For a state tensor, assigning correct quantization
        // parameters is sufficient, and for constants with asymmetric range it's
        // not correctly quantized by legacy quantizer so call the new Quantize.
        return Quantize(real_value, tensor_type);
      } else if (width == 16) {
        if (const auto uniform_type = dyn_cast<UniformQuantizedType>(q_type)) {
          const auto quantized_values =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 43.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td

    class UsedBy<string op> : Constraint<
      CPred<"llvm::isa<mlir::TFL::" # op # "Op>(*$0.getUsers().begin())">>;
    
    // When the op is passing-through, the output types of the quantized ops need
    // to be updated as well. Since the quantize op manages its own type by the
    // "qtype" attribute, we should update the type shape in this attribute.
    def ReorderTransposeDequantQuant :
          Pat<(TF_TransposeOp:$old_value
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_drq.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions='quantization-method=drq target-opset=UNIFORM_QUANTIZED' -quant-quantize-composite-functions='quantization-method=drq target-opset=UNIFORM_QUANTIZED' -symbol-dce | FileCheck %s
    
    module {
      // TODO(b/260020937): Support transpose_a, transpose_b for matmul.
      func.func @matmul(%arg0: tensor<2x12xf32>) -> (tensor<*xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc

              &q_builder, input_model, quantized_type, use_updated_hybrid_scheme,
              ::tflite::optimize::QuantizerType::OLD_QUANTIZER) != kTfLiteOk) {
        return absl::InvalidArgumentError(
            "Quantize weights transformation failed.");
      }
      const uint8_t* q_buffer = q_builder.GetBufferPointer();
      *result =
          std::string(reinterpret_cast<const char*>(q_buffer), q_builder.GetSize());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 23.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions_with_quantization_specs.mlir

    func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x2xf32> {
      %0 = stablehlo.add %arg0, %arg0 : tensor<1x2xf32>
      return %0 : tensor<1x2xf32>
    }
    // Tests that `composite_add_fn_1` does not quantize when quantizing
    // only compute-heavy ops.
    
    // STATIC-RANGE-PTQ-TO-COMPUTE-HEAVY: %[[CONST:.+]] = stablehlo.constant dense<2.000000e+00>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 18:09:38 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/passes.h

        bool enable_dynamic_update_slice);
    
    std::unique_ptr<OperationPass<ModuleOp>> CreateLowerStaticTensorListPass();
    
    // Creates an instance of the TensorFlow Lite dialect Quantize pass.
    // Use quant_specs.ops_blocklist and quant_specs.nodes_blocklist if possible
    // as they are now structure variables of QuantizationSpecs.
    std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 07 21:29:34 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc

        // quantized_tensor_data.pb.
        // Since this process doesn't happen for per layer, we need to set file_name
        // as quantized_tensor_data.pb here.
        // TODO: b/296933893 - Refactor the debugger code when no quantize option
        // is added
        std::string file_name =
            debugger_type_ == DebuggerConfig::DEBUGGER_TYPE_WHOLE_MODEL
                ? "unquantized_tensor_data.pb"
                : "quantized_tensor_data.pb";
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 13K bytes
    - Viewed (0)
Back to top