Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for relu_grad (0.18 sec)

  1. tensorflow/c/experimental/gradients/nn_grad.cc

        AbstractTensorHandle* upstream_grad = grad_outputs[0];
        AbstractTensorHandle* activations = forward_outputs_[0];
    
        // Calculate Grad
        std::string name = "relu_grad";
        TF_RETURN_IF_ERROR(ReluGrad(ctx, upstream_grad, activations,
                                    &grad_inputs[0], name.c_str()));
        return absl::OkStatus();
      }
      ~ReluGradientFunction() override {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 09 06:38:45 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

      %3 = "chlo.broadcast_maximum"(%2, %0) {broadcast_dimensions = array<i64>} : (tensor<?xi32>, tensor<i32>) -> tensor<?xi32>
      func.return %3 : tensor<?xi32>
    }
    
    // CHECK-LABEL:   func @relu_grad(
    // CHECK-SAME:                    %[[VAL_0:.*]]: tensor<4x8xf32>,
    // CHECK-SAME:                    %[[VAL_1:.*]]: tensor<?x?xf32>) -> tensor<4x8xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td

              (MHLO_ClampOp (MHLO_ConstantOp (GetScalarOfType<0> $input)), $input,
                           (MHLO_ConstantOp (GetScalarOfType<6> $input))),
              [(TF_IntOrFpTensor $input)]>;
    
    // ReluGrad(gradients, features) = gradients * (features > 0)
    // The condition that $gradients and $features need to have the same shape is
    // implicitly enforced: $zero is created to have the same shape as $features,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 34.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

      %0 = "tf.Elu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
      func.return %0: tensor<1xf32>
    }
    
    // -----
    
    // CHECK-LABEL: func @elu_grad
    // CHECK-SAME: (%[[GRADIENTS:.*]]: tensor<4x8xf32>, %[[FEATURES:.*]]: tensor<?x?xf32>)
    func.func @elu_grad(%gradients: tensor<4x8xf32>, %features: tensor<?x?xf32>) -> tensor<4x8xf32> {
      // CHECK-DAG: %[[ZERO:.*]] = mhlo.constant dense<0.000000e+00> : tensor<f32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

        static bool isCompatibleReturnTypes(TypeRange inferred, TypeRange actual) {
          return ArraysAreCastCompatible(inferred, actual);
        }
      }];
    }
    
    def TF_Relu6GradOp : TF_Op<"Relu6Grad", [Pure, TF_SameOperandsAndResultTypeResolveRef]> {
      let summary = "Computes rectified linear 6 gradients for a Relu6 operation.";
    
      let arguments = (ins
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
Back to top