Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 106 for getRank (0.23 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/tpu_annotate_dynamic_shape_inputs.cc

          BlockArgument arg = func.getArgument(index);
          auto inputType = mlir::dyn_cast<RankedTensorType>(arg.getType());
          // Only rank 1 tensor is supported for now.
          if (!inputType || inputType.getRank() != 1) continue;
          auto shape = llvm::to_vector<4>(inputType.getShape());
          llvm::SmallVector<int64_t, 4> bounds(shape.begin(), shape.end());
          // Mark the dim as dynamic dim.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/ir/tf_arith_ops_folder.h

        // Scalar identity is broadcastable to any operand shape, we only need to
        // check that operand has the same shape as a result.
        bool scalar_identity = identity_ty.hasRank() && identity_ty.getRank() == 0;
        if (scalar_identity) return operand_ty == result_ty;
    
        // If identity is not a scalar, we must verify that identity shape is
        // statically known to be broadcastable to the operand shape and the operand
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_collective.cc

          hlo::convertElementsAttr(group_assignment, builder.getIntegerType(64)));
      if (replica_groups.getType().getRank() != 2) {
        return op->emitOpError() << "group_assignment should have rank 2, got "
                                 << replica_groups.getType().getRank();
      }
      return success();
    }
    
    ChannelHandleAttr ConvertChannel(OpBuilder& builder, int64_t channel_id,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 16K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/utils/utils.td

    // Checks if the value has rank at most 'n'.
    class HasRankAtLeast<int n> : Constraint<
        CPred<"$0.getType().cast<ShapedType>().hasRank() && "
              "$0.getType().cast<ShapedType>().getRank() >= " # n>>;
    
    // Checks value is not produced by a TFL_Quant or
    // from TFL_Quant Op with same quant type.
    def NotFromQuantOpOrSameQuantType : Constraint<
      CPred<"tflite::NotFromQuantOpOrSameQuantType($0,$1)">>;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

        // Input should be rank 4.
        if (!input_ty.hasRank() || input_ty.getRank() != 4) {
          return failure();
        }
    
        // Check that out_size is rank-1, length-2. Otherwise the size is not legal.
        if (!out_size_ty.hasRank() || out_size_ty.getRank() != 1 ||
            out_size_ty.getShape()[0] != 2) {
          return failure();
        }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

        // tensor, for setting depth_multiplier attribute, etc.).
        auto filter = tf_op.getFilter();
        auto filter_type = mlir::dyn_cast<RankedTensorType>(filter.getType());
        if (!filter_type || filter_type.getRank() != 4 ||
            !filter_type.hasStaticShape())
          return failure();
    
        Value input = tf_op.getInput();
        RankedTensorType input_type =
            mlir::dyn_cast<RankedTensorType>(input.getType());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.cc

           * layout using the TPU API. Running legalize_tf.cc on non-TPU nodes
           * thus is a potential source of bugs.
           */
          minor_to_major.resize(t.getRank());
          std::iota(minor_to_major.begin(), minor_to_major.end(), 0);
          std::sort(minor_to_major.begin(), minor_to_major.end(),
                    [=](int64_t a, int64_t b) {
                      int64_t da = t.getDimSize(a);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td

    //===----------------------------------------------------------------------===//
    
    def GetBiasAddGradReductionIndices : NativeCodeCall<
      "GetBiasAddGradReductionIndices("
      "$0.getType().cast<RankedTensorType>().getRank(), $1, &$_builder)">;
    
    def LowerBiasAddGradOp :
      Pat<(TF_BiasAddGradOp AnyRankedTensor:$out_backprop, $data_format),
          (TF_SumOp $out_backprop,
                    (TF_ConstOp (GetBiasAddGradReductionIndices $out_backprop,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 24.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/post_quantize.cc

                (getElementTypeOrSelf(op.getOutput().getType()))))
          return failure();
    
        ElementsAttr input_tensor = qconst_op.getValue();
    
        assert(perm_tensor.getType().getRank() == 1);
        const int num_dimensions = input_tensor.getShapedType().getRank();
        assert(perm_tensor.getType().getNumElements() == num_dimensions);
    
        ArrayRef<int64_t> input_shape = input_tensor.getShapedType().getShape();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h

          return failure();
        }
    
        int quant_dim = -1;
        if (PerAxis) {
          // This is a special case that the quant_dim is the last dimensions.
          quant_dim = mlir::cast<ShapedType>(res.getType()).getRank() - 1;
        }
        // Use the min/max from the operands and the num_bits and narrow_range
        // attribute to create the quantization parameter for the new quantize op.
        rewriter.setInsertionPointAfter(tf_op.getOperation());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.6K bytes
    - Viewed (0)
Back to top