- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 119 for ShapedType (0.13 sec)
-
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.cc
using ::mlir::stablehlo::DotGeneralOp; bool HasStaticShape(Value value) { auto shaped_type = mlir::dyn_cast<ShapedType>(value.getType()); if (!shaped_type) return false; return shaped_type.hasStaticShape(); } bool HasStaticShapeAtDims(Value value, const ArrayRef<int> dims) { auto shaped_type = mlir::dyn_cast<ShapedType>(value.getType()); if (!shaped_type || !shaped_type.hasRank()) return false; for (auto dim : dims) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.h
if (operands.size() != 1 || updates.size() != 1) return failure(); ShapedType operand_type = mlir::cast<ShapedType>(operands[0].getType()); ShapedType indices_type = mlir::cast<ShapedType>(indices.getType()); ShapedType updates_type = mlir::cast<ShapedType>(updates[0].getType()); Value new_updates = updates[0]; // Can only convert with static shaped scatter.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/dot_general.cc
Value ConvertDot(PatternRewriter& rewriter, Value lhs, Value rhs, mhlo::DotDimensionNumbersAttr dot_dimension_numbers, ShapedType result_type, mlir::Location loc) { auto lhs_type = mlir::cast<ShapedType>(lhs.getType()); auto rhs_type = mlir::cast<ShapedType>(rhs.getType()); const int lhs_rank = lhs_type.getRank(); const int rhs_rank = rhs_type.getRank(); ImplicitLocOpBuilder builder(loc, rewriter);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
bool IsLastDimEqualToNumElements(Type type1, Type type2) { return (mlir::cast<ShapedType>(type1).getRank() >= 1 && mlir::cast<ShapedType>(type1).getDimSize( mlir::cast<ShapedType>(type1).getRank() - 1) == mlir::cast<ShapedType>(type2).getNumElements()); } bool CanFuseConvOrDepthwiseConvShapes(const ArrayRef<int64_t> filter_shape,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_utils.cc
if (auto bool_attr = mlir::dyn_cast_or_null<BoolAttr>(attr)) { return bool_attr.getValue(); } return std::nullopt; } ShapedType GetNhwcReturnTypeFromNchw(Operation* old_op) { auto composite_result_shape = mlir::cast<ShapedType>(old_op->getResults().front().getType()).getShape(); std::array<int64_t, 4> output_shape; // NHWC <- NCHW output_shape[0] = composite_result_shape[0];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 18:33:05 UTC 2024 - 3.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
void BroadcastBatchDimensionsForBatchMatMul(OpBuilder &builder, Location loc, Value &input, Value &weight) { ShapedType input_type = mlir::cast<ShapedType>(input.getType()); ShapedType weight_type = mlir::cast<ShapedType>(weight.getType()); const int32_t input_rank = input_type.getRank(); const int32_t weight_rank = weight_type.getRank();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc
return builder.create<TF::ReshapeOp>( loc, RankedTensorType::get(shape, builder.getI32Type()), value, CreateConstValue<int64_t>(builder, loc, {rank}, shape)); }; ShapedType filter_shape = mlir::cast<ShapedType>(filter.getType()); Value input_shape_value = builder.create<TF::ShapeOp>( loc, RankedTensorType::get({num_dims}, builder.getI32Type()), input);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
if (dim == axis) { if (ShapedType::isDynamic(operand_dim_size) || ShapedType::isDynamic(result_dim_size)) { result_dim_sizes[axis] = ShapedType::kDynamic; } else { result_dim_sizes[axis] += operand_dim_size; } continue; } if (ShapedType::isDynamic(operand_dim_size)) continue; if (ShapedType::isDynamic(result_dim_size)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc
Operation *op = srcop.getOperation(); bool allTypesFp = true; bool allTypesQuantizedOrInt = true; for (auto operand : op->getOperands()) { ShapedType type = mlir::dyn_cast<ShapedType>(operand.getType()); if (!type) continue; allTypesFp &= !mlir::isa<quant::QuantizedType>(type.getElementType()); allTypesQuantizedOrInt &=
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/constant_utils.cc
} } // Returns a Constant op with a splat vector value. absl::StatusOr<arith::ConstantOp> CreateConstOpWithVectorValue( PatternRewriter* rewriter, Location loc, ShapedType shaped_type, int value) { ShapedType dense_type = RankedTensorType::get(shaped_type.getShape(), shaped_type.getElementType()); auto attr = CreateTypedAttr(dense_type, value);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.5K bytes - Viewed (0)