Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 294 for quantized_ (0.41 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc

        // For consistency, we require all quantized composite function to have
        // the "tf_quant.quantized_ops" attribute.
        if (!new_func.getSymName().starts_with("quantized_")) continue;
        if (!new_func->hasAttrOfType<ArrayAttr>("tf_quant.quantized_ops")) {
          new_func->emitError() << "Missing \"tf_quant.quantized_ops\" "
                                   "attribute in the quantized composite function.";
          signalPassFailure();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h

    // The keyword to detect if this is a `NullAttribute`.
    inline constexpr StringRef kNullAttributeValue = "N/A";
    
    // Prefixes attached to lifted functions.
    constexpr StringRef kQuantizedFuncPrefix = "quantized_";
    constexpr StringRef kCompositeFuncPrefix = "composite_";
    
    // The attribute will be used for TF::XlaCallModuleOp to restore the original
    // function name when loading it back.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc

        for (auto user : users) {
          if (!llvm::isa<mlir::stablehlo::UniformDequantizeOp>(user)) {
            return failure();
          }
        }
        auto func_name = call_op.getCallee();
        if (!func_name.starts_with("quantized_")) return failure();
        if (call_op->getNumResults() != 1) return failure();
        if (!mlir::isa<UniformQuantizedType>(
                getElementTypeOrSelf(call_op->getResult(0).getType())))
          return failure();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/cc/report.cc

        return result;
      } else {
        return std::nullopt;
      }
    }
    
    // Populates quantized ops from `module_op` to `results`. After going through
    // the quantization passes, quantized ops are represented as `func::CallOp` with
    // a callee's prefix of `quantized_`.
    void PopulateQuantizedResults(ModuleOp module_op,
                                  QuantizationResults& results) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/gen_quantized_function_library.py

        # Add op names to the function name.
        function_name = 'quantized_{}'.format(
            _format_snake_case_op_name(quantized_ops[0]))
        if len(quantized_ops) > 1:
          function_name += '_with_{}'.format(
              _format_snake_case_op_name(quantized_ops[1]))
        if len(quantized_ops) > 1:
          for quantized_op in quantized_ops[2:]:
            function_name += '_and_{}'.format(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 20 01:38:06 UTC 2022
    - 8.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc

        // This op has been quantized, so we should not consider it again.
        if (quantized_.contains(op)) continue;
        quantized_.insert(op);
    
        if (auto constant_op = dyn_cast<arith::ConstantOp>(op); constant_op) {
          // If the workflow requires inferring ranges from the content
          // (post-training quantization) and it is weight (filter) and hasn't
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 38.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc

      // The quantized call op without the _quantization_method attribute is not
      // captured as a `QuantizationResult`.
      ASSERT_THAT(results.results(), IsEmpty());
    }
    
    TEST_F(QuantizationReportTest, InitializeWithModuleOpWithInvalidCalleeName) {
      // A quantized dot_general op but the callee function has an invalid name. It
      // is expected to start with `quantized_`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 10:10:34 UTC 2024
    - 18.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

    //
    //   1. Replaces quantized `TF::XlaCallModuleOp` with a `func::CallOp`.
    //   2. Quantizes the callee function.
    //
    // The inputs of this pattern assumes an invalid IR, where even if a
    // `TF::XlaCallModuleOp` is quantized the callee remains unquantized. Step (2)
    // not only replaces the input and output tensor types into quantized ones, but
    // also rewrites the body with a quantized equivalent.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h

      bool SetConstantResultParams(Operation* op);
    
      // Inserts the Quantize and Dequantize ops after `op`'s `index`-th result. The
      // quantized element type for the result is `quantized_type`.
      void QuantizeOpResult(Operation* op, int result_index,
                            QuantizedType quantized_type);
    
      // Inserts the Quantize and Dequantize ops after `arg`. The quantized element
      // type for `arg` is `quantized_type`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 20 11:42:17 UTC 2024
    - 16.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

        }
        lines.push_back("");
        lines.push_back(absl::StrFormat(
            "Number of quantized layers with quantized outputs: %d/%d",
            total_quantized_func_count - float_output_func_count,
            total_quantized_func_count));
        lines.push_back(absl::StrFormat("Number of quantize layers added: %d",
                                        quantize_func_count));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
Back to top