Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 11 for relu6_grad (0.25 sec)

  1. tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py

    
    @tf.RegisterGradient('NewConv2D')
    def _conv_add_relu_grad(op: ops.Operation, grad):
      act = op.get_attr('act')
      y = op.outputs[0]
      if act == 'RELU':
        grad = gen_nn_ops.relu_grad(grad, y)
      elif act == 'RELU6':
        grad = gen_nn_ops.relu6_grad(grad, y)
      elif act == 'TANH':
        y = math_ops.conj(y)
        grad = gen_math_ops.tanh_grad(y, grad)
    
      broadcast_shape = tf.shape(y)
      input_value_shape = tf.shape(op.inputs[2])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 31 20:23:51 UTC 2023
    - 6.8K bytes
    - Viewed (0)
  2. tensorflow/c/experimental/gradients/nn_grad.cc

        AbstractTensorHandle* upstream_grad = grad_outputs[0];
        AbstractTensorHandle* activations = forward_outputs_[0];
    
        // Calculate Grad
        std::string name = "relu_grad";
        TF_RETURN_IF_ERROR(ReluGrad(ctx, upstream_grad, activations,
                                    &grad_inputs[0], name.c_str()));
        return absl::OkStatus();
      }
      ~ReluGradientFunction() override {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 09 06:38:45 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/tests/keras_imagenet_main_graph_mode.golden_summary

     Conv2DBackpropInput 52
     DivNoNan 1
     Equal 1
     FusedBatchNorm 53
     FusedBatchNormGrad 53
     Identity 2
     MatMul 3
     MaxPool 1
     MaxPoolGrad 1
     Mean 1
     Mul 164
     Pad 1
     ReadVariableOp 646
     Relu 49
     ReluGrad 49
     Reshape 2
     ResourceApplyKerasMomentum 161
     ShapeN 50
     Softmax 1
     SparseSoftmaxCrossEntropyWithLogits 1
     Square 55
     Squeeze 1
     Sub 106
     Sum 57
     Tile 1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 06 10:38:14 UTC 2023
    - 740 bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/tests/keras_imagenet_main.golden_summary

     Conv2DBackpropFilter 53
     Conv2DBackpropInput 52
     Equal 1
     FusedBatchNormGradV2 53
     FusedBatchNormV2 53
     MatMul 3
     MaxPool 1
     MaxPoolGrad 1
     Mean 1
     Mul 218
     Pad 2
     ReadVariableOp 538
     Relu 49
     ReluGrad 49
     Reshape 2
     ResourceApplyKerasMomentum 161
     Slice 1
     Softmax 1
     SparseSoftmaxCrossEntropyWithLogits 1
     Squeeze 1
     Sum 1
     Tile 1
     Transpose 1
    cluster 1 size 815
     AddN 1
     AssignAddVariableOp 1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 06 10:38:14 UTC 2023
    - 874 bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td

              (MHLO_ClampOp (MHLO_ConstantOp (GetScalarOfType<0> $input)), $input,
                           (MHLO_ConstantOp (GetScalarOfType<6> $input))),
              [(TF_IntOrFpTensor $input)]>;
    
    // ReluGrad(gradients, features) = gradients * (features > 0)
    // The condition that $gradients and $features need to have the same shape is
    // implicitly enforced: $zero is created to have the same shape as $features,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 34.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/mark_for_compilation_pass.cc

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir

        %0 = "tf.Selu"(%arg0) : (tensor<1x4x4x3xf32>) -> tensor<1x4x4x3xf32>
        func.return %0 : tensor<1x4x4x3xf32>
    }
    
    // CHECK-LABEL: func @selu_grad
    // CHECK-SAME: (%[[GRADIENTS:.*]]: tensor<4x8xf32>, %[[FEATURES:.*]]: tensor<4x8xf32>) -> tensor<4x8xf32> {
    func.func @selu_grad(%gradients: tensor<4x8xf32>, %features: tensor<4x8xf32>) -> tensor<4x8xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 92K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

      %3 = "chlo.broadcast_maximum"(%2, %0) {broadcast_dimensions = array<i64>} : (tensor<?xi32>, tensor<i32>) -> tensor<?xi32>
      func.return %3 : tensor<?xi32>
    }
    
    // CHECK-LABEL:   func @relu_grad(
    // CHECK-SAME:                    %[[VAL_0:.*]]: tensor<4x8xf32>,
    // CHECK-SAME:                    %[[VAL_1:.*]]: tensor<?x?xf32>) -> tensor<4x8xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

      %0 = "tf.Elu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
      func.return %0: tensor<1xf32>
    }
    
    // -----
    
    // CHECK-LABEL: func @elu_grad
    // CHECK-SAME: (%[[GRADIENTS:.*]]: tensor<4x8xf32>, %[[FEATURES:.*]]: tensor<?x?xf32>)
    func.func @elu_grad(%gradients: tensor<4x8xf32>, %features: tensor<?x?xf32>) -> tensor<4x8xf32> {
      // CHECK-DAG: %[[ZERO:.*]] = mhlo.constant dense<0.000000e+00> : tensor<f32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/shape_inference.mlir

      func.func @testSameOperandsAndResultTypeResolveRefBinary(%lhs: tensor<2x3x?x?xf32>, %rhs: tensor<2x?x5x?xf32>) -> (tensor<?x?x?x?xf32>) {
        // CHECK: (tensor<2x3x?x?xf32>, tensor<2x?x5x?xf32>) -> tensor<2x3x5x?xf32>
        %0 = "tf.ReluGrad"(%lhs, %rhs) : (tensor<2x3x?x?xf32>, tensor<2x?x5x?xf32>) -> tensor<?x?x?x?xf32>
        func.return %0 : tensor<?x?x?x?xf32>
      }
    
      // CHECK-LABEL: func @test_xla_sharding
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 17:24:10 UTC 2024
    - 167.4K bytes
    - Viewed (0)
Back to top