Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 13 for input_rank (0.15 sec)

  1. tensorflow/cc/gradients/grad_helper.cc

      // should be replaced by 1.
      // We use DynamicStitch to do this.
    
      // input_rank = 4
      auto input_rank = Size(scope, input_shape);
    
      // Normalize any negative indices in the reduction_axes to positive
      // values.
      auto axes = Mod(scope, Add(scope, reduction_axes, input_rank), input_rank);
    
      // This [0..input_rank) range of integers is used in DynamicStitch to
      // first copy input_shape to the result.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 07 23:11:54 UTC 2022
    - 2.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc

              mlir::cast<RankedTensorType>(input.getType());
          const int input_rank = input_type.getRank();
    
          // Create a 1D I32 tensor for representing the dimension permutation.
          auto permuation_tensor_type =
              RankedTensorType::get({input_rank}, rewriter.getIntegerType(32));
          llvm::SmallVector<Attribute, 4> permute;
          permute.reserve(input_rank);
          // First create an identity permutation tensor.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.cc

      // https://github.com/openxla/stablehlo/blob/main/docs/spec.md#dot_general
      const bool has_proper_rank =
          (input_rank == 1 || input_rank == 2) && filter_rank == 2;
      const bool has_proper_contracting_dim =
          lhs_contracting_dims.size() == 1 && rhs_contracting_dims.size() == 1 &&
          lhs_contracting_dims[0] == input_rank - 1;
      const bool is_not_batch_op =
          dot_dimension_numbers.getLhsBatchingDimensions().empty();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

        auto input_shape = input_ty.getShape();
        int input_rank = input_shape.size();
        SmallVector<int32_t, 4> shift_map(input_rank, 0);
        for (int i = 0; i < axis_attr.getNumElements(); ++i) {
          int32_t axis_i = axis_attr.getValues<int32_t>()[i];
          if (axis_i < 0) axis_i += input_rank;
          int32_t shift_i = shift_attr.getValues<int32_t>()[i];
          shift_map[axis_i] += shift_i;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc

            dims_to_reverse;
        int64_t input_rank = input_ty.getRank();
        ArrayRef<int64_t> input_shape = input_ty.getShape();
        hlo_begin_indices.reserve(input_rank);
        hlo_end_indices.reserve(input_rank);
        hlo_strides.reserve(input_rank);
    
        int64_t indices_elements = begin_indices.size();
        if (input_rank < indices_elements) return failure();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 20:00:43 UTC 2024
    - 291.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc

        const int input_rank = input_type.getRank();
    
        // Create a 1D I32 tensor for representing the dimension permutation.
        auto permuation_tensor_type =
            RankedTensorType::get({input_rank}, rewriter.getIntegerType(32));
        llvm::SmallVector<Attribute, 4> permute;
        permute.reserve(input_rank);
        // First create an identity permutation tensor.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 20:06:54 UTC 2024
    - 45.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc

      if (!input_type) return success();
    
      int64_t input_rank = input_type.getRank();
      if (input_rank == 0)
        return op.emitOpError("cannot split scalar input tensor");
    
      DenseIntElementsAttr split_dim_attr;
      if (!matchPattern(split_dim, m_Constant(&split_dim_attr))) return success();
    
      int64_t index = (*split_dim_attr.begin()).getSExtValue();
    
      if (index + input_rank < 0 || index >= input_rank) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 170.8K bytes
    - Viewed (0)
  8. tensorflow/cc/gradients/array_grad.cc

      // begin = [1, 2, 1], size = [1, 3, 2]
      Input input = op.input(0);
      Input begin = op.input(1);
      // input_rank = 3
      auto input_rank = Rank(scope, input);
      // slice_size = [1, 3, 2]
      auto slice_size = Shape(scope, op.output(0));
      // padding_shape = [3, 1]
      auto padding_shape = Stack(scope, {input_rank, 1});
      // before_padding = [[1]
      //                   [2]
      //                   [1]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 10 23:33:32 UTC 2023
    - 31.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc

      ShapedType weight_type = mlir::cast<ShapedType>(weight.getType());
      const int32_t input_rank = input_type.getRank();
      const int32_t weight_rank = weight_type.getRank();
      const int32_t broadcasted_rank = std::max(input_rank, weight_rank);
    
      const int32_t num_matmul_dim = 2;
      const int32_t num_input_batch_dim = input_rank - num_matmul_dim;
      const int32_t num_weight_batch_dim = weight_rank - num_matmul_dim;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 47.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc

        // Subtract `input_rank` by 1 to get the item's rank, which is used as
        // `partial_position_shape`.
        auto input_rank = rewriter->create<TF::RankOp>(
            loc, tensorflow::GetTypeFromTFTensorShape({}, shape_dtype), input);
        auto partial_position_shape = rewriter->create<TF::SubOp>(
            loc, tensorflow::GetTypeFromTFTensorShape({1}, shape_dtype), input_rank,
            vector_one);
        auto slice_op =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 20:00:43 UTC 2024
    - 70.7K bytes
    - Viewed (0)
Back to top