- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 57 for getRank (0.15 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/transforms/hlo_matchers.cc
auto iota_shape = iota_type.getShape(); auto reduce_dim = (*dimensions.value_begin<APInt>()).getSExtValue(); if (reduce_dim < 0) reduce_dim += iota_type.getRank(); auto index = std::optional<SmallVector<int64_t>>(std::in_place, iota_type.getRank()); while (index.has_value()) { StridedArrayView<DenseIntElementsAttr> array_view( iota_const_attr, iota_shape, *index, reduce_dim);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc
if (!type || !type.getElementType().isF32()) { return failure(); } return success( op->hasOneUse() && IsWeightQuantizableFunction(*op->getUses().begin(), type.getRank())); } // Checks if the operand is second operand of `tf.XlaCallModule` op for // `stablehlo.convolution` or `stablehlo.dot_general` with fully_quantizable // trait.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc
} // Currently we only support compressing weights of ops: // Conv, DepthwiseConv, TransposeConv, whose filter has rank 4, and // FullyConnected, whose filter has rank 2. if (type.getRank() != 2 && type.getRank() != 4) { result.can_compress = false; return result; } float random_sparsity = CalculateRandomSparsity(attr, type); if (random_sparsity < kMinSparsityLevel) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 16.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/utils.h
inline Type TransposeLastTwoDims(Type type) { auto shaped_type = type.dyn_cast<ShapedType>(); if (!shaped_type.hasStaticShape() || shaped_type.getRank() < 2) { return nullptr; } int rank = shaped_type.getRank(); if (rank < 2) { return nullptr; } SmallVector<int64_t> new_shape(shaped_type.getShape().begin(), shaped_type.getShape().end());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils.cc
auto input_type = mlir::cast<RankedTensorType>(input.getType()); SmallVector<int64_t, 4> output_shape; int size_of_splits; if (input_type.getRank() < axis || axis < 0) return failure(); for (int i = 0; i < input_type.getRank(); ++i) { int64_t dim = input_type.getDimSize(i); if (i == axis) { if (dim % splits != 0) { return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 36.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/fold_broadcast_pass.cc
// Index for the broadcasted matrix. llvm::SmallVector<int64_t, 16> current_index(result_type.getRank(), 0); // Computes the new operand shape using the original shape and the broadcast // dimensions to match result shape. llvm::SmallVector<int64_t, 16> operand_new_shape(result_type.getRank(), 1); for (int i = 0; i < dimensions.size(); ++i) { operand_new_shape[dimensions[i]] = operand.getType().getDimSize(i);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc
mlir::dyn_cast_or_null<ShapedType>(operand.getType()); return result_type && operand_type && result_type.hasRank() && operand_type.hasRank() && result_type.getRank() == operand_type.getRank(); }); if (!is_valid_move) return; } // At this point we checked that we can safely move Transpose node before // `op`, and bypass all result transposes.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
for (const auto& axis_int : perm.getValues<APInt>()) { int64_t axis = axis_int.getSExtValue(); if (axis < 0) { axis += input_type.getRank(); } if (axis < 0 || (input_type.hasRank() && axis >= input_type.getRank())) { return transpose_op.emitOpError("perm must be in [-rank, rank)"); } if (std::count(axes.begin(), axes.end(), axis) > 0) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_collective.cc
hlo::convertElementsAttr(group_assignment, builder.getIntegerType(64))); if (replica_groups.getType().getRank() != 2) { return op->emitOpError() << "group_assignment should have rank 2, got " << replica_groups.getType().getRank(); } return success(); } ChannelHandleAttr ConvertChannel(OpBuilder& builder, int64_t channel_id,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 16K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td
//===----------------------------------------------------------------------===// def GetBiasAddGradReductionIndices : NativeCodeCall< "GetBiasAddGradReductionIndices(" "$0.getType().cast<RankedTensorType>().getRank(), $1, &$_builder)">; def LowerBiasAddGradOp : Pat<(TF_BiasAddGradOp AnyRankedTensor:$out_backprop, $data_format), (TF_SumOp $out_backprop, (TF_ConstOp (GetBiasAddGradReductionIndices $out_backprop,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 24.7K bytes - Viewed (0)