Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for reduce_sum (0.11 sec)

  1. tensorflow/cc/gradients/linalg_grad.cc

          scope, Slice1dHelper(scope, x_shape, bx_start, bx_end),
          Slice1dHelper(scope, y_shape, by_start, by_end));
      grad_x = Reshape(
          scope, ReduceSum(scope, grad_x, Add(scope, bx_start, args.r0)), x_shape);
      grad_y = Reshape(
          scope, ReduceSum(scope, grad_y, Add(scope, by_start, args.r1)), y_shape);
      grad_outputs->push_back(grad_x);
      grad_outputs->push_back(grad_y);
      return scope.status();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 07 23:11:54 UTC 2022
    - 20.4K bytes
    - Viewed (0)
  2. tensorflow/c/eager/parallel_device/parallel_device.cc

            // Non-parallel tensors from _EagerConst/tf.constant are implicitly
            // broadcast, i.e. set as the input to each parallel operation. This
            // allows code like "tf.constant(1.)" or "tf.reduce_sum(..., axis=1)"
            // (where the value starts on the host), without allowing other implicit
            // copies/broadcasts. Other implicit copies may be supported eventually,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 29 22:05:31 UTC 2023
    - 18.3K bytes
    - Viewed (0)
  3. tensorflow/cc/gradients/array_grad.cc

      // all the gradients from the shape it fills.
      // We use ReduceSum to implement this, which needs an argument providing
      // the indices of all the dimensions of the incoming gradient.
      // grad(x) = reduce_sum(grad(y), [0..rank(grad(y))])
      auto all_dims = Range(scope, Const(scope, 0), Rank(scope, grad_inputs[0]),
                            Const(scope, 1));
      grad_outputs->push_back(ReduceSum(scope, grad_inputs[0], all_dims));
      return scope.status();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 10 23:33:32 UTC 2023
    - 31.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc

          mlir::dyn_cast_or_null<RankedTensorType>(op.getOperand(0).getType());
      if (!lhs) {
        return failure();
      }
      // unary einsum op is only supported to the case where the operation can be
      // replaced using reduce_sum and/or transpose
      if (const auto dnums_or =
              GetEinsumDimensionNumbersUnary(op.getEquation(), lhs)) {
        return rewriteToReduceSumAndTranspose(op, dnums_or.value(), rewriter);
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/hlo_matchers.cc

      auto iota_shape = iota_type.getShape();
      auto reduce_dim = (*dimensions.value_begin<APInt>()).getSExtValue();
      if (reduce_dim < 0) reduce_dim += iota_type.getRank();
    
      auto index =
          std::optional<SmallVector<int64_t>>(std::in_place, iota_type.getRank());
      while (index.has_value()) {
        StridedArrayView<DenseIntElementsAttr> array_view(
            iota_const_attr, iota_shape, *index, reduce_dim);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.6K bytes
    - Viewed (0)
Back to top