Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 53 for ShapedType (0.12 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/transforms/fold_broadcast_pass.cc

      auto lhs = dyn_cast_or_null<DenseElementsAttr>(lhs_op.getValue());
      auto rhs = dyn_cast_or_null<DenseElementsAttr>(rhs_op.getValue());
      if (!lhs || !rhs) return {};
    
      ShapedType type = mlir::cast<ShapedType>(op->getType());
      if (!type.hasStaticShape()) {
        return {};
      }
    
      Type etype = type.getElementType();
    
      // Evaluate for element types.
      if (!mlir::isa<ElementType>(etype)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_weight.cc

      QuantizationUnits GetQuantizableOps(ConstantOp op) const {
        // Non-float tensors do not need quantization.
        QuantizationUnits quantizable_ops;
        const ShapedType type = mlir::dyn_cast<ShapedType>(op.getType());
        if (!type || !type.getElementType().isF32()) return quantizable_ops;
    
        const Value value = op.getResult();
    
        for (OpOperand& use : value.getUses()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/ir/tf_traits.h

            element_type) {
          return op->emitOpError(
              "requires compatible element types for all operands and results");
        }
      }
      return success();
    }
    
    inline ShapedType MergeType(ShapedType a, ShapedType b) {
      if (!a.hasRank()) {
        return b;
      }
      if (!b.hasRank()) {
        return a;
      }
      int64_t rank = a.getRank();
      SmallVector<int64_t, 4> dims;
      dims.resize(rank);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.cc

      for (auto s : arr) {
        if (s != iota) return false;
        ++iota;
      }
      return true;
    }
    
    PermutationAndShape GetPermutationAndTransposedShape(
        llvm::ArrayRef<int64_t> permutation_array, ShapedType input_type,
        ConversionPatternRewriter& rewriter) {
      assert(permutation_array.size() == input_type.getRank());
      llvm::SmallVector<int64_t> transposed_shape(permutation_array.size());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/translate/export_tf_dialect_op.cc

      }
    
      // Here we only add the shapes for the leading values with ShapedType,
      // assuming values with non-ShapedType are put at the end of the result.
      if (!ignore_unregistered_attrs && inst->getNumResults() > 0) {
        auto values = inst->getResults();
        auto begin = values.begin();
        auto end = values.begin();
        while (end != values.end() && mlir::isa<mlir::ShapedType>((*end).getType()))
          end++;
        if (begin != end) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 11.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc

          }
        } else if (function_name.contains("batch_matmul")) {
          // For BatchMatMul, the input must be ranked to determine the batch
          // dimensions.
          ShapedType shaped_type =
              mlir::dyn_cast<ShapedType>(call_op->getOperand(0).getType());
          if (!shaped_type || !shaped_type.hasRank()) {
            return absl::InternalError("The input of BatchMatMul must have rank.");
          }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 16.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc

        auto transpose_op = *transpose_ops.begin();
        auto result_type =
            mlir::dyn_cast_or_null<ShapedType>(transpose_op.getResult().getType());
        auto is_valid_move =
            llvm::all_of(op->getOperands(), [result_type](Value operand) -> bool {
              auto operand_type =
                  mlir::dyn_cast_or_null<ShapedType>(operand.getType());
              return result_type && operand_type && result_type.hasRank() &&
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 19.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc

    namespace tensorflow {
    
    using llvm::ArrayRef;
    using llvm::SmallVector;
    using mlir::Builder;
    using mlir::DenseStringElementsAttr;
    using mlir::ElementsAttr;
    using mlir::RankedTensorType;
    using mlir::ShapedType;
    using mlir::Type;
    using tensorflow::errors::InvalidArgument;
    
    static TensorProto ConvertToProto(const Tensor& input_tensor,
                                      bool use_tensor_content = true) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 26 09:37:10 UTC 2024
    - 20.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

          if (!cast_op || cast_op.getResult().use_empty()) continue;
    
          // Get types
          Type old_result_type = op.getResult().getType();
          ShapedType new_result_type =
              mlir::dyn_cast<ShapedType>(cast_op.getType());
    
          // Proceeds only if the casting is to float16
          if (!new_result_type.getElementType().isF16()) continue;
    
          // Cast values
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc

          attrs.push_back(rewriter.getNamedAttr(attr_name, attr_val));
        }
      }
    
      auto feature_group_cnt_attr = llvm::StringRef("feature_group_count");
      int feature_group_cnt = 1;
      ShapedType input_shape =
          mlir::dyn_cast<ShapedType>(op->getOperand(0).getType());
      if (!input_shape) {
        return op->emitError(
            "Only input with known shape is supported for Uniform Quantized "
            "opset.");
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 18.7K bytes
    - Viewed (0)
Back to top