Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 3 of 3 for L2Loss (0.09 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td

    //===----------------------------------------------------------------------===//
    // L2Loss op patterns.
    //===----------------------------------------------------------------------===//
    
    def GetAllAxes : NativeCodeCall<
      "GetI64ElementsAttrForSeq("
      "0, $0.getType().cast<RankedTensorType>().getRank(), &$_builder)">;
    
    // L2Loss is lowered using the formula,
    // L2Loss(input) = Sum(input * input) / 2
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 24.7K bytes
    - Viewed (0)
  2. tensorflow/cc/gradients/nn_grad_test.cc

    using ops::Conv2D;
    using ops::Conv2DBackpropInput;
    using ops::DepthwiseConv2dNative;
    using ops::Elu;
    using ops::FractionalAvgPool;
    using ops::FractionalMaxPool;
    using ops::FusedBatchNormV3;
    using ops::L2Loss;
    using ops::LogSoftmax;
    using ops::LRN;
    using ops::MaxPool;
    using ops::MaxPool3D;
    using ops::MaxPoolV2;
    using ops::Placeholder;
    using ops::Relu;
    using ops::Relu6;
    using ops::Selu;
    using ops::Softmax;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 22 20:45:22 UTC 2022
    - 15K bytes
    - Viewed (0)
  3. tensorflow/cc/gradients/nn_grad.cc

                      std::vector<Output>* grad_outputs) {
      grad_outputs->push_back(Mul(scope, op.input(0), grad_inputs[0]));
      return scope.status();
    }
    REGISTER_GRADIENT_OP("L2Loss", L2LossGrad);
    
    Status BiasAddGradHelper(const Scope& scope, const Operation& op,
                             const std::vector<Output>& grad_inputs,
                             std::vector<Output>* grad_outputs) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 27 23:34:33 UTC 2022
    - 24.5K bytes
    - Viewed (0)
Back to top