Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 7 of 7 for L2Loss (0.07 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td

    //===----------------------------------------------------------------------===//
    // L2Loss op patterns.
    //===----------------------------------------------------------------------===//
    
    def GetAllAxes : NativeCodeCall<
      "GetI64ElementsAttrForSeq("
      "0, $0.getType().cast<RankedTensorType>().getRank(), &$_builder)">;
    
    // L2Loss is lowered using the formula,
    // L2Loss(input) = Sum(input * input) / 2
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 24.7K bytes
    - Viewed (0)
  2. tensorflow/cc/gradients/nn_grad_test.cc

    using ops::Conv2D;
    using ops::Conv2DBackpropInput;
    using ops::DepthwiseConv2dNative;
    using ops::Elu;
    using ops::FractionalAvgPool;
    using ops::FractionalMaxPool;
    using ops::FusedBatchNormV3;
    using ops::L2Loss;
    using ops::LogSoftmax;
    using ops::LRN;
    using ops::MaxPool;
    using ops::MaxPool3D;
    using ops::MaxPoolV2;
    using ops::Placeholder;
    using ops::Relu;
    using ops::Relu6;
    using ops::Selu;
    using ops::Softmax;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 22 20:45:22 UTC 2022
    - 15K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/tests/opens2s_gnmt_mixed_precision.golden_summary

     TensorArrayV3 8
     TensorArrayWriteV3 9
     Unique 2
     VariableV2 164
     _Retval 5
    cluster 0 size 440
     Abs 40
     AddN 1
     Any 41
     Cast 40
     ConcatV2 2
     Const 95
     ExpandDims 2
     IsInf 1
     IsNan 40
     L2Loss 40
     LogicalOr 1
     Max 41
     Minimum 1
     Mul 82
     Pack 3
     Reciprocal 2
     Reshape 2
     ReverseSequence 1
     Sqrt 1
     Sum 1
     Transpose 3
    cluster 1 size 86
     BroadcastGradientArgs 1
     Cast 5
     ConcatV2 1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 06 10:38:14 UTC 2023
    - 5K bytes
    - Viewed (0)
  4. tensorflow/cc/gradients/nn_grad.cc

                      std::vector<Output>* grad_outputs) {
      grad_outputs->push_back(Mul(scope, op.input(0), grad_inputs[0]));
      return scope.status();
    }
    REGISTER_GRADIENT_OP("L2Loss", L2LossGrad);
    
    Status BiasAddGradHelper(const Scope& scope, const Operation& op,
                             const std::vector<Output>& grad_inputs,
                             std::vector<Output>* grad_outputs) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 27 23:34:33 UTC 2022
    - 24.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir

      %0 = "tf.L2Loss"(%arg0) : (tensor<?x?xf32>) -> tensor<f32>
    
      // CHECK: return %[[LOSS]] : tensor<f32>
      func.return %0 : tensor<f32>
    }
    
    // CHECK-LABEL: func @l2_loss_unranked
    func.func @l2_loss_unranked(%arg0: tensor<*xf32>) -> tensor<f32> {
      // CHECK: tf.L2Loss
      %0 = "tf.L2Loss"(%arg0) : (tensor<*xf32>) -> tensor<f32>
      func.return %0 : tensor<f32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 92K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/mark_for_compilation_pass.cc

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td

      );
    
      TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
    }
    
    
    def TF_L2LossOp : TF_Op<"L2Loss", [Pure]> {
      let summary = "L2 Loss.";
    
      let description = [{
    Computes half the L2 norm of a tensor without the `sqrt`:
    
        output = sum(t ** 2) / 2
      }];
    
      let arguments = (ins
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 04:08:35 UTC 2024
    - 90.5K bytes
    - Viewed (0)
Back to top