Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 47 for isF32 (0.07 sec)

  1. tensorflow/compiler/mlir/lite/ir/tfl_ops.cc

      const auto output_type = getType(0).cast<ShapedType>();
    
      // Folding only implemented for float tensors.
      if (!input_type.getElementType().isF32() ||
          !weights_type.getElementType().isF32() ||
          !output_type.getElementType().isF32() ||
          (has_bias && !bias_type.getElementType().isF32())) {
        return failure();
      }
    
      // Folding only implemented for static shapes
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 169.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

    // Checks if the param passed is a F32 ElementsAttr.
    def F32ElementsAttr : ElementsAttrBase<
      CPred<"$_self.isa<ElementsAttr>() && $_self.cast<ElementsAttr>().getShapedType().getElementType().isF32()">,
            "32 bit float constant tensor">;
    
    // Checks if the param passed is a float ElementsAttr.
    def FloatElementsAttr : ElementsAttrBase<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

                             QuantizationUnits& quantizable_ops) const {
        // Non-float tensors do not need quantization.
        auto type = mlir::dyn_cast<ShapedType>(op.getType());
        if (!type || !type.getElementType().isF32()) return false;
    
        Value value = op.getResult();
    
        // Check whether dynamic range quantization can be applied.
        for (auto& use : value.getUses()) {
          Operation* user = use.getOwner();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/legalize_patterns.td

    def DenseElementsAttr : ElementsAttrBase<
      CPred<"$_self.isa<DenseElementsAttr>()">,
      "non-opaque constant tensor">;
    
    def F32ElementsAttr : ElementsAttrBase<
      CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isF32()">, "float constant tensor">;
    
    def Int64ElementsAttr : ElementsAttrBase<
      CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isInteger(64)">, "Int 64 constant tensor">;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 28.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc

            GetFilterConstantOp(filter_value);
        auto filter_value_attr =
            mlir::cast<DenseElementsAttr>(filter_constant_op.getValue());
        if (filter_value_attr.getElementType().isF32()) {
          // This is i8 values disguised as f32 (due to the upcast trick). Simply
          // cast them to i8.
          filter_value_attr =
              mlir::cast<DenseFPElementsAttr>(filter_value_attr)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc

      auto bias_type =
          mlir::RankedTensorType::get({num_units}, output_type.getElementType());
    
      mlir::DenseElementsAttr bias_attr;
      if (output_type.getElementType().isF32()) {
        float val = 0.0;
        bias_attr = mlir::DenseFPElementsAttr::get(bias_type, val);
      } else {
        // TODO(renjieliu): Refactor this and share the logic with
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 25.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/optimize.cc

      }
    
      return false;
    }
    
    // Returns true if the value's element type is F32.
    bool IsF32Value(Value value) {
      return mlir::cast<ShapedType>(value.getType()).getElementType().isF32();
    }
    
    // Returns the number of elements in attr if it is a static shape, 1 otherwise,
    // as an unranked int32 Attribute.
    TypedAttr GetNumElementsOrOne(Type type) {
      auto shaped_type = mlir::cast<ShapedType>(type);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

                 .isF32()) {
          return failure();
        }
        MLIRContext *context = rewriter.getContext();
        llvm::SmallVector<Value, 2> operands{op.getA(), op.getB()};
        for (Value &operand : operands) {
          TensorType tensor_type = mlir::cast<TensorType>(operand.getType());
          Type element_type = tensor_type.getElementType();
          if (element_type.isF32()) continue;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

      return base_function_name.concat("_fn").str();
    }
    
    bool ContainsFloatResultType(ArrayRef<Type> result_types) {
      for (auto current_type : result_types) {
        if (mlir::dyn_cast<TensorType>(current_type).getElementType().isF32())
          return true;
      }
      return false;
    }
    
    // Unwraps quantization parameters of PartitionedCall ops with quantized
    // input/outputs that are created from QuantizePass.
    class QuantizeFunctionPattern
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

          if (!epsilon)
            epsilon = rewriter.getFloatAttr(rewriter.getF32Type(), 0.0001f);
    
          if (!(((mlir::isa<::mlir::FloatAttr>(epsilon))) &&
                ((mlir::cast<::mlir::FloatAttr>(epsilon).getType().isF32())))) {
            return rewriter.notifyMatchFailure(
                fused_batch_norm_op, [&](::mlir::Diagnostic &diag) {
                  diag << "op 'tf.FusedBatchNormV3' attribute 'epsilon' failed to "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
Back to top