Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 113 for ShapedType (0.12 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce.h

        // we have separate ops for them. If only one of them is used then the other
        // one will be garbage collected later.
        if (!mlir::isa<ShapedType>(operand.getType())) return failure();
        auto operand_type = mlir::cast<ShapedType>(operand.getType());
        if (operand_type.getElementType().isInteger(1)) {
          // TF does not support min or max on boolean (int1) arguments.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type.cc

    bool IsLargeFloatType(Type type) {
      type = getElementTypeOrSelf(type);
      return isa<FloatType>(type) && type.getIntOrFloatBitWidth() > 16;
    }
    
    Type ToBfloat16Type(Type type) {
      if (auto shaped = mlir::dyn_cast<ShapedType>(type)) {
        const Type elem = shaped.getElementType();
        if (IsLargeFloatType(elem)) {
          return shaped.clone(BFloat16Type::get(type.getContext()));
        }
      } else if (IsLargeFloatType(type)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc

          }
        } else if (function_name.contains("batch_matmul")) {
          // For BatchMatMul, the input must be ranked to determine the batch
          // dimensions.
          ShapedType shaped_type =
              mlir::dyn_cast<ShapedType>(call_op->getOperand(0).getType());
          if (!shaped_type || !shaped_type.hasRank()) {
            return absl::InternalError("The input of BatchMatMul must have rank.");
          }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 16.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_with_tf2xla.cc

      std::string device_type_;
      bool prefer_tf2xla_;
      bool use_tf2xla_hlo_importer_;
    };
    
    bool ShouldRefineTypeTo(Type original_ty, Type updated_ty) {
      auto updated = mlir::dyn_cast<ShapedType>(updated_ty);
      auto original = mlir::dyn_cast<ShapedType>(original_ty);
    
      // Both types must be shaped types.
      if (!original || !updated) return false;
    
      // Element types must match.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 9.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc

        auto transpose_op = *transpose_ops.begin();
        auto result_type =
            mlir::dyn_cast_or_null<ShapedType>(transpose_op.getResult().getType());
        auto is_valid_move =
            llvm::all_of(op->getOperands(), [result_type](Value operand) -> bool {
              auto operand_type =
                  mlir::dyn_cast_or_null<ShapedType>(operand.getType());
              return result_type && operand_type && result_type.hasRank() &&
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 19.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc

    namespace tensorflow {
    
    using llvm::ArrayRef;
    using llvm::SmallVector;
    using mlir::Builder;
    using mlir::DenseStringElementsAttr;
    using mlir::ElementsAttr;
    using mlir::RankedTensorType;
    using mlir::ShapedType;
    using mlir::Type;
    using tensorflow::errors::InvalidArgument;
    
    static TensorProto ConvertToProto(const Tensor& input_tensor,
                                      bool use_tensor_content = true) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 26 09:37:10 UTC 2024
    - 20.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

          if (!cast_op || cast_op.getResult().use_empty()) continue;
    
          // Get types
          Type old_result_type = op.getResult().getType();
          ShapedType new_result_type =
              mlir::dyn_cast<ShapedType>(cast_op.getType());
    
          // Proceeds only if the casting is to float16
          if (!new_result_type.getElementType().isF16()) continue;
    
          // Cast values
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/tac/transforms/fold_constants_to_subgraph.cc

        // arith ConstOp path.
        auto type =
            mlir::cast<ShapedType>(arith_const_op.getType()).getElementType();
        if (!type.isInteger(32) && !type.isInteger(64)) return false;
      } else if (auto const_op = dyn_cast_or_null<TFL::ConstOp>(op)) {
        // ConstOp path.
        auto type = mlir::cast<ShapedType>(const_op.getType()).getElementType();
        if (!type.isInteger(32) && !type.isInteger(64)) return false;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/readonly_references_to_resources.cc

      if (walk_res.wasInterrupted()) return signalPassFailure();
    
      for (VariableV2Op variable_v2_op : variable_v2s_to_replace) {
        builder.setInsertionPoint(variable_v2_op);
        ShapedType shaped_type =
            mlir::cast<ShapedType>(variable_v2_op.getResult().getType());
        TensorType tensor_type = mlir::cast<TensorType>(DropRefType(shaped_type));
        StringAttr device_attr =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc

          attrs.push_back(rewriter.getNamedAttr(attr_name, attr_val));
        }
      }
    
      auto feature_group_cnt_attr = llvm::StringRef("feature_group_count");
      int feature_group_cnt = 1;
      ShapedType input_shape =
          mlir::dyn_cast<ShapedType>(op->getOperand(0).getType());
      if (!input_shape) {
        return op->emitError(
            "Only input with known shape is supported for Uniform Quantized "
            "opset.");
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 18.7K bytes
    - Viewed (0)
Back to top