- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 130 for ShapedType (0.32 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_avg_pool.h
// Given a Composite op that wraps a core.aten.avg_pool2d, and assuming that // the padding part is extracted into a tfl.pad op prior to a // tfl.average_pool_2d, this function finds the return type of the needed // tfl.pad . ShapedType GetPadOpType(mhlo::CompositeOp op); // Given a Composite op that wraps a core.aten.avg_pool2d, finds the padding // attribute to be passed to the a tfl.average_pool_2d that can fully replace
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 23:16:05 UTC 2024 - 2.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
if (!IsQI32Type(input_dequant.getType())) return failure(); auto output_type = mlir::dyn_cast_or_null<ShapedType>(dequant_op.getOutput().getType()); if (!output_type || !output_type.getElementType().isF32()) return failure(); auto input_type = mlir::dyn_cast<ShapedType>(input_dequant.getType()); // TODO(renjieliu): support UniformQuantizedPerAxisType.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize.cc
// Convert mhlo.dot to mhlo.dot_general. LogicalResult ConvertDotToDotGeneral(mhlo::DotOp op, PatternRewriter &rewriter) { auto lhs_type = mlir::cast<ShapedType>(op.getLhs().getType()); auto rhs_type = mlir::cast<ShapedType>(op.getRhs().getType()); if (!lhs_type.hasRank() || !rhs_type.hasRank()) { return rewriter.notifyMatchFailure(op, "unsupported unranked input type"); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 26.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td
CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>().getRank() == " # m>]>>; // Returns true if the n-th result has unknown rank or has rank m. class TF_ResultHasRank<int n, int m> : PredOpTrait<"result " # n # " is " # m # "-D", Or<[TF_ResultIsUnrankedPred<n>, CPred<"$_op.getResult(" # n # ").getType().cast<ShapedType>().getRank() == " # m>]>>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 30.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
auto loc = op.getLoc(); auto result_ty = mlir::cast<ShapedType>(op.getType()); auto input = op.getImages(); auto input_ty = mlir::cast<ShapedType>(input.getType()); auto input_element_ty = input_ty.getElementType(); auto out_size = op.getSize(); auto out_size_ty = mlir::cast<ShapedType>(out_size.getType()); auto out_size_element_ty = out_size_ty.getElementType();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
CPred<"$_op.getOperand(0).getType().cast<ShapedType>().getRank() <= 1">, CPred<"$_op.getOperand(0).getType().cast<ShapedType>().getRank() == 2 && !$_op.getOperand(0).getType().cast<ShapedType>().hasStaticShape()">, CPred<"$_op.getOperand(0).getType().cast<ShapedType>().getRank() == 2 && $_op.getOperand(0).getType().cast<ShapedType>().getShape()[1] <= 4">]>>]> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc
.isExactlyValue(0.0f)); EXPECT_EQ(fused_lstm_func_.getFunctionType().getNumResults(), 1); auto output_types = fused_lstm_func_.getFunctionType().getResults(); SmallVector<int64_t, 2> output_shape{1, mlir::ShapedType::kDynamic}; EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getShape().size(), output_shape.size()); for (int i = 0; i < output_shape.size(); i++) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
return failure(); } // Remove identity reshape with both static result and input shape. auto result_type = mlir::cast<ShapedType>(op.getType()); auto input_type = mlir::cast<ShapedType>(op.getInput().getType()); // Constant folding // If the result type isn't static, tries to derive the result type from // the #2 operand.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_variables.cc
// not. const char kLegalizeTflVariables[] = "tfl._legalize_tfl_variables"; bool HasSupportedElementType(Operation* op) { return utils::IsSupportedVariableType(op); } bool IsSupportedElementType(ShapedType type) { return utils::IsSupportedVariableType(type); } #include "tensorflow/compiler/mlir/lite/transforms/generated_legalize_variables.inc" // Pass which legalizes TF variables which are already passed as bounded
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc
const xla::ConvolutionDimensionNumbers &dnums, PatternRewriter &rewriter) { StringAttr conv_padding = op.getPaddingAttr(); SmallVector<int64_t> padding_nums; ShapedType lhs_shape = mlir::cast<ShapedType>(op.getLhs().getType()); ShapedType rhs_shape = mlir::cast<ShapedType>(op.getRhs().getType()); // Handle only static shape cases. // TODO(b/260284866): Handle dynamic shape cases. if (!lhs_shape.hasStaticShape()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 30.9K bytes - Viewed (0)