Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 23 for isF32 (0.04 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc

        if (op->getNumResults() != 1) {
          return failure();
        }
        auto type = mlir::cast<TensorType>(op->getResult(0).getType());
        if (!type || !type.getElementType().isF32()) {
          return failure();
        }
        return success(
            op->hasOneUse() &&
            IsWeightQuantizableFunction(*op->getUses().begin(), type.getRank()));
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 10.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

                      dyn_cast_or_null<DequantizeOpT>(operand.getDefiningOp())) {
                is_operand_or_result_modified = true;
                inputs.push_back(dq_op.getOperand());
              } else if (!ele_type.isF32()) {
                // If the operand is an integer tensor, then it doesn't require the
                // DequantizeOp in the pattern.
                inputs.push_back(operand);
              } else {
                return failure();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc

          for (OpOperand &input : op->getOpOperands()) {
            Type element_type = getElementTypeOrSelf(input.get().getType());
            // Non-float cases won't be calibrated.
            if (!element_type.isF32()) {
              continue;
            }
    
            // Skip when there is any already existing CustomAggregatorOp found.
            Operation *defining_op = input.get().getDefiningOp();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/flatbuffer_operator.h

      return std::vector<uint64_t>();
    }
    
    template <>
    inline std::vector<float> GetVector(DenseElementsAttr elements) {
      auto type = elements.getType();
      auto elemType = type.getElementType();
      if (elemType.isF32()) {
        auto vec = llvm::to_vector(llvm::map_range(
            elements.getValues<APFloat>(),
            [&](APFloat value) -> float { return value.convertToFloat(); }));
        return std::vector<float>(vec.begin(), vec.end());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 21:00:09 UTC 2024
    - 11.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.cc

              if (mlir::isa<mlir::IntegerType>(element_type)) {
                rand_val = mlir::IntegerAttr::get(element_type, std::rand());
              } else if (element_type.isF16() || element_type.isF32() ||
                         element_type.isF64()) {
                rand_val = mlir::FloatAttr::get(element_type,
                                                std::rand() * 1.0 / RAND_MAX);
    
              } else {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 11:51:44 UTC 2024
    - 14.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/internal/passes/xla_broadcast.cc

      // Xla's all_reduce legalizer bitcasts to 32 bits, so only
      // element types size <= 4 bytes are supported.
      if (elem_type.isBF16() || elem_type.isF16() || elem_type.isTF32() ||
          elem_type.isF32()) {
        zero = builder.getFloatAttr(elem_type, 0);
      } else {
        return false;
      }
      if (auto ranked_type = dyn_cast<RankedTensorType>(type)) {
        llvm::ArrayRef<int64_t> type_shape = ranked_type.getShape();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 18:52:07 UTC 2024
    - 13.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc

        // Only the composite functions with f32 inputs are quantizable.
        if (call_op.getResults().size() == 1 &&
            !mlir::cast<ShapedType>(call_op->getResult(0).getType())
                 .getElementType()
                 .isF32()) {
          check_status.Update(absl::InternalError(
              "Composite functions for quantization should be f32 type."));
        }
    
        // The OK status means this op is quantizable. Return failure since the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 16.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

                             QuantizationUnits& quantizable_ops) const {
        // Non-float tensors do not need quantization.
        auto type = mlir::dyn_cast<ShapedType>(op.getType());
        if (!type || !type.getElementType().isF32()) return false;
    
        Value value = op.getResult();
    
        // Check whether dynamic range quantization can be applied.
        for (auto& use : value.getUses()) {
          Operation* user = use.getOwner();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/legalize_patterns.td

    def DenseElementsAttr : ElementsAttrBase<
      CPred<"$_self.isa<DenseElementsAttr>()">,
      "non-opaque constant tensor">;
    
    def F32ElementsAttr : ElementsAttrBase<
      CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isF32()">, "float constant tensor">;
    
    def Int64ElementsAttr : ElementsAttrBase<
      CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isInteger(64)">, "Int 64 constant tensor">;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 28.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc

      auto bias_type =
          mlir::RankedTensorType::get({num_units}, output_type.getElementType());
    
      mlir::DenseElementsAttr bias_attr;
      if (output_type.getElementType().isF32()) {
        float val = 0.0;
        bias_attr = mlir::DenseFPElementsAttr::get(bias_type, val);
      } else {
        // TODO(renjieliu): Refactor this and share the logic with
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 25.4K bytes
    - Viewed (0)
Back to top