Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 129 for getRank (0.68 sec)

  1. tensorflow/compiler/mlir/lite/transforms/legalize_patterns.td

                             (TFL_TopKV2Op $input, $k)>;
    
    def ReductionDimensionIsLastDim : Constraint<CPred<"($0.cast<IntegerAttr>().getInt() == "
      "$1.getType().cast<ShapedType>().getRank() - 1 || $0.cast<IntegerAttr>().getInt() == -1)">>;
    
    // Legalizes TF_ApproxTopKOp to TFL_TopKV2Op with the following constraints:
    //    1. It computes max k
    //    2. The reduction dimension is the last dim of the input.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 28.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h

    // `ShapedType` or its rank is unknown.
    inline bool HasRankOf(Value value, const int64_t rank) {
      auto shaped_type = mlir::dyn_cast_or_null<ShapedType>(value.getType());
      return shaped_type && shaped_type.hasRank() && shaped_type.getRank() == rank;
    }
    
    // Creates a new type that has the shape from the `old_type` and the element
    // type from the `element_type`.
    Type CloneTypeWithNewElementType(Type old_type, Type element_type);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc

            return (constant.getType().getRank() == 2);
          };
    
      auto op = cast<BatchMatMulOpType>(bmm_op);
    
      // Create a tfl.transpose op that performs ZX transpose on `input`.
      auto create_z_x_transpose_op = [&](Value input) -> Value {
        RankedTensorType input_type = mlir::cast<RankedTensorType>(input.getType());
        const int input_rank = input_type.getRank();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 20:06:54 UTC 2024
    - 45.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc

        return op.emitError("lhs must have static shape.");
      }
      if (!rhs_shape.hasStaticShape()) {
        return op.emitError("rhs must have static shape.");
      }
    
      const int64_t padding_nums_size = 2 * (rhs_shape.getRank() - 2);
      padding_nums.reserve(padding_nums_size);
      if (conv_padding.strref() == "EXPLICIT") {
        for (auto padding_elem :
             op.getExplicitPaddingAttr().template getAsRange<IntegerAttr>()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 30.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/ir/tf_traits.h

      }
      return success();
    }
    
    inline ShapedType MergeType(ShapedType a, ShapedType b) {
      if (!a.hasRank()) {
        return b;
      }
      if (!b.hasRank()) {
        return a;
      }
      int64_t rank = a.getRank();
      SmallVector<int64_t, 4> dims;
      dims.resize(rank);
      for (int i = 0, e = rank; i != e; i++) {
        int64_t dim0 = a.getDimSize(i);
        int64_t dim1 = b.getDimSize(i);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc

    // packed_value = bitwise_or(packed_low, packed_high)
    Value PackOperand(OpBuilder &builder, Location loc, Value value, int pack_dim) {
      ShapedType value_type = mlir::cast<ShapedType>(value.getType());
      const int rank = value_type.getRank();
    
      SmallVector<int64_t> packed_shape(value_type.getShape().begin(),
                                        value_type.getShape().end());
      RankedTensorType shape_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc

        auto output_scale_type =
            mlir::dyn_cast<ShapedType>(op->getOperand(3).getType());
        if (!output_scale_type) {
          return failure();
        }
        if (output_scale_type.hasRank() && 0 < output_scale_type.getRank()) {
          output_quantization_axis = activation_quantization_axis;
        }
      }
      // For per-axis -> per-axis requantization, input and output quantization
      // axis must be equal.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 18.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_ops.cc

    }
    
    // Returns true if the rank of the value equals to the given rank.
    bool RankEquals(Value value, int rank) {
      auto rank_type = mlir::dyn_cast<RankedTensorType>(value.getType());
      return (rank_type && rank_type.getRank() == rank);
    }
    
    #include "tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_patterns.inc"
    
    void FallbackToFlexOps::runOnOperation() {
      if (mode_.empty()) return;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc

        // the first `TensorListSetItemOp`.
        if (auto shaped_type = element_shape.getType().dyn_cast<ShapedType>()) {
          if (shaped_type.hasRank() && shaped_type.getRank() == 0) {
            bool element_shape_acquired = false;
            auto uses = op.getResult().getUses();
            for (auto &use : llvm::make_early_inc_range(uses)) {
              if (TF::TensorListSetItemOp set_op =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 20:00:43 UTC 2024
    - 70.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/collection_ops_util.cc

      auto per_slice_shape = llvm::to_vector<8>(buffer_type.getShape());
      per_slice_shape[0] = 1;
      auto slice_sizes = GetR1Const(per_slice_shape, builder, loc);
      llvm::SmallVector<int64_t, 8> starts_in_update(buffer_type.getRank(), 0);
      for (int64_t i = 0; i < updates_type.getDimSize(0); ++i) {
        auto index = builder.create<TF::SliceOp>(
            loc, ArrayRef<Type>{GetSizeType(builder)},
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.5K bytes
    - Viewed (0)
Back to top