Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for RegisterGradient (0.64 sec)

  1. tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py

        return tf.raw_ops.Relu(features=res)
      elif act == 'RELU6':
        return tf.raw_ops.Relu6(features=res)
      elif act == 'TANH':
        return tf.raw_ops.Tanh(x=res)
      else:
        return res
    
    
    @tf.RegisterGradient('NewConv2D')
    def _conv_add_relu_grad(op: ops.Operation, grad):
      act = op.get_attr('act')
      y = op.outputs[0]
      if act == 'RELU':
        grad = gen_nn_ops.relu_grad(grad, y)
      elif act == 'RELU6':
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 31 20:23:51 UTC 2023
    - 6.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/ops/xla_ops_grad.py

    # limitations under the License.
    # ==============================================================================
    
    from tensorflow.python.framework import ops
    
    
    @ops.RegisterGradient("XlaClusterOutput")
    def _XlaClusterOutputGrad(_, grad):
      del grad  # unused
      raise RuntimeError("Gradient computation of graph in xla.compile() is "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 1.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfr/examples/pad/ops_defs.py

              axis=i,
              num_split=2)
    
        input_ = tf.raw_ops.Concat(
            concat_dim=i, values=[left_padding, input_, right_padding])
      return input_
    
    
    @tf.RegisterGradient('NewMirrorPad')
    def _mirror_pad_grad(op, grad):
      mode = op.get_attr('mode')
      return [gen_array_ops.mirror_pad_grad(grad, op.inputs[1], mode=mode), None]
    
    
    @Composite(
        'NewMirrorPadGrad',
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Oct 01 05:00:29 UTC 2021
    - 5.6K bytes
    - Viewed (0)
  4. tensorflow/c/experimental/ops/array_ops.cc

    //
    //   ```python
    //   with tf.get_default_graph().gradient_override_map(
    //       {'IdentityN': 'OverrideGradientWithG'}):
    //     y, _ = identity_n([f(x), x])
    //
    //   @tf.RegisterGradient('OverrideGradientWithG')
    //   def ApplyG(op, dy, _):
    //     return [None, g(dy)]  # Do not backprop to f(x).
    //   ```
    Status IdentityN(AbstractContext* ctx,
                     absl::Span<AbstractTensorHandle* const> input,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 10 19:11:36 UTC 2022
    - 6.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

    such that dx = g(dy). In Python,
    
    ```python
    with tf.get_default_graph().gradient_override_map(
        {'IdentityN': 'OverrideGradientWithG'}):
      y, _ = identity_n([f(x), x])
    
    @tf.RegisterGradient('OverrideGradientWithG')
    def ApplyG(op, dy, _):
      return [None, g(dy)]  # Do not backprop to f(x).
    ```
      }];
    
      let arguments = (ins
        Variadic<TF_Tensor>:$input
      );
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
Back to top