Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 13 for reduce_sum (0.24 sec)

  1. tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py

      broadcast_shape = tf.shape(y)
      input_value_shape = tf.shape(op.inputs[2])
      _, reduction_axes = tf.raw_ops.BroadcastGradientArgs(
          s0=broadcast_shape, s1=input_value_shape)
      updates_grad_reshaped = tf.reduce_sum(
          grad, axis=reduction_axes, keepdims=True)
      bias_grad = tf.reshape(updates_grad_reshaped, input_value_shape)
    
      dilations = [1, op.get_attr('dilation_w'), op.get_attr('dilation_h'), 1]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 31 20:23:51 UTC 2023
    - 6.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc

            /*asymmetric_quantize_inputs=*/mlir::BoolAttr());
        rewriter.replaceOp(bmm_op, {fc_op.getResult(0)});
    
        return success();
      };
    };
    
    // Converts batch_matmul operation with a ones tensor to a reduce_sum.
    struct ConvertBatchMatMulOpToReduceSum
        : public OpRewritePattern<TFL::BatchMatMulOp> {
      using OpRewritePattern<TFL::BatchMatMulOp>::OpRewritePattern;
      LogicalResult matchAndRewrite(TFL::BatchMatMulOp bmm_op,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.6K bytes
    - Viewed (0)
  3. tensorflow/cc/gradients/array_grad.cc

      // all the gradients from the shape it fills.
      // We use ReduceSum to implement this, which needs an argument providing
      // the indices of all the dimensions of the incoming gradient.
      // grad(x) = reduce_sum(grad(y), [0..rank(grad(y))])
      auto all_dims = Range(scope, Const(scope, 0), Rank(scope, grad_inputs[0]),
                            Const(scope, 1));
      grad_outputs->push_back(ReduceSum(scope, grad_inputs[0], all_dims));
      return scope.status();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 10 23:33:32 UTC 2023
    - 31.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc

          mlir::dyn_cast_or_null<RankedTensorType>(op.getOperand(0).getType());
      if (!lhs) {
        return failure();
      }
      // unary einsum op is only supported to the case where the operation can be
      // replaced using reduce_sum and/or transpose
      if (const auto dnums_or =
              GetEinsumDimensionNumbersUnary(op.getEquation(), lhs)) {
        return rewriteToReduceSumAndTranspose(op, dnums_or.value(), rewriter);
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

            self.w = np.random.uniform(low=-2, high=2, size=w_shape).astype('f4')
    
          @def_function.function
          def condition(self, x, w):
            return math_ops.reduce_sum(x, keepdims=False) < 100
    
          @def_function.function
          def body(self, x, w):
            z = nn_ops.conv2d(x, w, padding='SAME')
            return z, w
    
          @def_function.function(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

      let summary = "Log softmax operator";
    
      let description = [{
        Computes element-wise log softmax activations with the following formula
    
          input - log(reduce_sum(exp(input), dim))
      }];
    
      let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8, TFL_Quint8]>:$input);
    
      let results = (outs TFL_TensorOf<[F32, QUI8, QI8, TFL_Quint8]>:$output);
    
      let hasOptions = 1;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  7. tensorflow/cc/gradients/math_grad.cc

      auto reduce =
          internal::BroadcastGradientArgs(scope, x_batch_shape, y_batch_shape);
      (*grad_outputs)[0] =
          Reshape(scope, ReduceSum(scope, (*grad_outputs)[0], reduce.r0), sx);
      (*grad_outputs)[1] =
          Reshape(scope, ReduceSum(scope, (*grad_outputs)[1], reduce.r1), sy);
      return scope.status();
    }
    REGISTER_GRADIENT_OP("BatchMatMulV2", BatchMatMulV2Grad);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 50.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/transforms/hlo_matchers.cc

      auto iota_shape = iota_type.getShape();
      auto reduce_dim = (*dimensions.value_begin<APInt>()).getSExtValue();
      if (reduce_dim < 0) reduce_dim += iota_type.getRank();
    
      auto index =
          std::optional<SmallVector<int64_t>>(std::in_place, iota_type.getRank());
      while (index.has_value()) {
        StridedArrayView<DenseIntElementsAttr> array_view(
            iota_const_attr, iota_shape, *index, reduce_dim);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

            is greater than 10.
    
            Args:
              x: Input tensor.
    
            Returns:
              A map of: output key -> output result.
            """
            if math_ops.reduce_sum(x) > 10.0:
              out = math_ops.matmul(x, self.filters_0)
              out = nn_ops.bias_add(out, self.bias_0)
              return {'output': out}
    
            out = math_ops.matmul(x, self.filters_1)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
  10. tensorflow/c/experimental/gradients/nn_grad.cc

                     absl::Span<AbstractTensorHandle*> grad_inputs) override {
        /* Given upstream grad U and a BiasAdd: A + bias, the gradients are:
         *
         *    dA = U
         *    dbias = reduceSum(U, dims = channel_dim)
         */
    
        AbstractTensorHandle* upstream_grad = grad_outputs[0];
        DCHECK(upstream_grad);
    
        // Recover data format from forward pass for gradient.
        std::string data_format;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 09 06:38:45 UTC 2024
    - 5.7K bytes
    - Viewed (0)
Back to top