Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 10 for LogSoftmax (0.14 sec)

  1. tensorflow/cc/gradients/nn_grad_test.cc

    using ops::Conv2DBackpropInput;
    using ops::DepthwiseConv2dNative;
    using ops::Elu;
    using ops::FractionalAvgPool;
    using ops::FractionalMaxPool;
    using ops::FusedBatchNormV3;
    using ops::L2Loss;
    using ops::LogSoftmax;
    using ops::LRN;
    using ops::MaxPool;
    using ops::MaxPool3D;
    using ops::MaxPoolV2;
    using ops::Placeholder;
    using ops::Relu;
    using ops::Relu6;
    using ops::Selu;
    using ops::Softmax;
    using ops::Softplus;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 22 20:45:22 UTC 2022
    - 15K bytes
    - Viewed (0)
  2. tensorflow/cc/gradients/nn_grad.cc

        auto multiply_result = Multiply(scope, subtraction_result, logits_softmax);
        grad = Add(scope, grad, multiply_result);
      }
      auto minus_log_softmax = Multiply(scope, LogSoftmax(scope, logits), -1.0f);
      grad_outputs->push_back(grad);
      grad_outputs->push_back(BroadcastMul(scope, grad_loss, minus_log_softmax));
      return scope.status();
    }
    REGISTER_GRADIENT_OP("SoftmaxCrossEntropyWithLogits",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 27 23:34:33 UTC 2022
    - 24.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir

      // CHECK: return %[[RESULT]]
      %0 = "tf.LogSoftmax"(%arg0) : (tensor<2x3xf32>) -> tensor<2x3xf32>
      func.return %0: tensor<2x3xf32>
    }
    
    // CHECK-LABEL: func @unranked_logsoftmax
    func.func @unranked_logsoftmax(%arg0: tensor<*xf32>) -> tensor<*xf32> {
      // CHECK-NOT: "tf.LogSoftmax"
      %0 = "tf.LogSoftmax"(%arg0) : (tensor<*xf32>) -> tensor<*xf32>
      func.return %0: tensor<*xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 92K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td

    // computes loss and backprop of the loss with respect to 'features'.
    //
    // Softmax cross entropy loss is defined as follows:
    //
    //  loss = Sum(-labels * Log(Exp(features) / Sum(Exp(features)))
    //  loss = Sum(-labels * LogSoftmax(features))
    //
    // Computing gradient of the loss with respect to features gives us,
    //
    //  backprop = (Exp(features) / Sum(Exp(features))) - labels
    //  backprop = Softmax(features) - labels
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 24.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/mark_for_compilation_pass.cc

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

              concat_dim);
          result = concat_op.getResult();
        }
    
        rewriter.replaceOp(op, result);
        return success();
      }
    };
    
    // Decomposes Softmax and LogSoftmax to primitive TF ops, using the following
    // formulas:
    //
    //     softmax = div(exp(logits), sum(exp(logits)))
    //     log_softmax = sub(logits, log(sum(exp(logits))))
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

      %1 = "tf.Log"(%0) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<8x16xf32>) -> tensor<8x16xf32>
      func.return %1: tensor<8x16xf32>
    
      // CHECK: %0 = "tf.LogSoftmax"(%arg0) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<8x16xf32>) -> tensor<8x16xf32>
      // CHECK: return %0
    }
    
    // CHECK-LABEL: testLogToLog1p
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

      func.return %0 : tensor<8x16xf32>
    // CHECK-LABEL: log
    // CHECK:  "tfl.log"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
    }
    
    func.func @log_softmax(%arg0: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.LogSoftmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
      func.return %0 : tensor<8x16xf32>
    // CHECK-LABEL: log_softmax
    // CHECK:  "tfl.log_softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

          return ArraysAreCastCompatible(inferred, actual);
        }
      }];
    }
    
    def TF_LogSoftmaxOp : TF_Op<"LogSoftmax", [Pure, TF_SameOperandsAndResultTypeResolveRef]> {
      let summary = "Computes log softmax activations.";
    
      let description = [{
    For each batch `i` and class `j` we have
    
        logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
      }];
    
      let arguments = (ins
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
  10. RELEASE.md

        stateless and do not touch any resources.
    *   Refactors code in Quant8 LSTM support to reduce TFLite binary size.
    *   Add support of local soft device placement for eager op.
    *   Add HW acceleration support for `LogSoftMax`.
    *   Added a function `nested_value_rowids` for ragged tensors.
    *   Add guard to avoid acceleration of L2 Normalization with input rank != 4
    *   Add `tf.math.cumulative_logsumexp operation`.
    *   Add `tf.ragged.stack`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 730.3K bytes
    - Viewed (0)
Back to top