- Sort Score
- Result 10 results
- Languages All
Results 1 - 3 of 3 for reduce_sum (0.19 sec)
-
tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py
broadcast_shape = tf.shape(y) input_value_shape = tf.shape(op.inputs[2]) _, reduction_axes = tf.raw_ops.BroadcastGradientArgs( s0=broadcast_shape, s1=input_value_shape) updates_grad_reshaped = tf.reduce_sum( grad, axis=reduction_axes, keepdims=True) bias_grad = tf.reshape(updates_grad_reshaped, input_value_shape) dilations = [1, op.get_attr('dilation_w'), op.get_attr('dilation_h'), 1]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 31 20:23:51 UTC 2023 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc
/*asymmetric_quantize_inputs=*/mlir::BoolAttr()); rewriter.replaceOp(bmm_op, {fc_op.getResult(0)}); return success(); }; }; // Converts batch_matmul operation with a ones tensor to a reduce_sum. struct ConvertBatchMatMulOpToReduceSum : public OpRewritePattern<TFL::BatchMatMulOp> { using OpRewritePattern<TFL::BatchMatMulOp>::OpRewritePattern; LogicalResult matchAndRewrite(TFL::BatchMatMulOp bmm_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.6K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/nn_grad.cc
absl::Span<AbstractTensorHandle*> grad_inputs) override { /* Given upstream grad U and a BiasAdd: A + bias, the gradients are: * * dA = U * dbias = reduceSum(U, dims = channel_dim) */ AbstractTensorHandle* upstream_grad = grad_outputs[0]; DCHECK(upstream_grad); // Recover data format from forward pass for gradient. std::string data_format;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 09 06:38:45 UTC 2024 - 5.7K bytes - Viewed (0)