Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 116 for Selu (0.03 sec)

  1. tensorflow/cc/gradients/nn_grad_test.cc

    using ops::DepthwiseConv2dNative;
    using ops::Elu;
    using ops::FractionalAvgPool;
    using ops::FractionalMaxPool;
    using ops::FusedBatchNormV3;
    using ops::L2Loss;
    using ops::LogSoftmax;
    using ops::LRN;
    using ops::MaxPool;
    using ops::MaxPool3D;
    using ops::MaxPoolV2;
    using ops::Placeholder;
    using ops::Relu;
    using ops::Relu6;
    using ops::Selu;
    using ops::Softmax;
    using ops::Softplus;
    using ops::Softsign;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 22 20:45:22 UTC 2022
    - 15K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td

                  (CreateTFShapeOp $input, $input, ConstBoolAttrTrue))),
              [(TensorOf<[TF_Int, TF_Float, TF_Complex]> $updates)]>;
    
    //===----------------------------------------------------------------------===//
    // Selu op patterns.
    //===----------------------------------------------------------------------===//
    
    def getScale : NativeCodeCall<
      "GetScalarOfType(getElementTypeOrSelf($0), 1.0507009873554804934193349852946)"
      >;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 24.7K bytes
    - Viewed (0)
  3. tensorflow/cc/gradients/nn_grad.cc

                          std::vector<Output>* grad_outputs) {
      auto dx = internal::ReluGrad(scope, grad_inputs[0], op.input(0));
      grad_outputs->push_back(dx);
      return scope.status();
    }
    REGISTER_GRADIENT_OP("Relu", ReluGradHelper);
    
    Status Relu6GradHelper(const Scope& scope, const Operation& op,
                           const std::vector<Output>& grad_inputs,
                           std::vector<Output>* grad_outputs) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 27 23:34:33 UTC 2022
    - 24.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir

      // CHECK-NOT: "tf.LogSoftmax"
      %0 = "tf.LogSoftmax"(%arg0) : (tensor<*xf32>) -> tensor<*xf32>
      func.return %0: tensor<*xf32>
    }
    
    // CHECK-LABEL: func @selu
    // CHECK-SAME:  (%[[FEATURES:.*]]: tensor<1x4x4x3xf32>) -> tensor<1x4x4x3xf32> {
    func.func @selu(%arg0: tensor<1x4x4x3xf32>) -> tensor<1x4x4x3xf32> {
        // CHECK-DAG:   %[[ZERO:.*]] = "tf.Const"() <{value = dense<0.000000e+00> : tensor<f32>}> : () -> tensor<f32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 92K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/mark_for_compilation_pass.cc

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir

      // CHECK: %[[VAL_0:.*]] = "tf._FusedConv2D"(%arg2, %arg1, %arg0) <{data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 0.000000e+00 : f32, explicit_paddings = [], fused_ops = ["BiasAdd", "Relu"], num_args = 1 : i64, operandSegmentSizes = array<i32: 1, 1, 1, 0>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true}> {TArgs = [f32]} : (tensor<8x32x32x3xf32>, tensor<1x1x3x128xf32>, tensor<128xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tfr/resources/decomposition_lib.mlir

      %add = tfr.call @tf__add(%dot, %bias) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor
    
      %relu = tfr.constant "relu" -> !tfr.attr
      %relu6 = tfr.constant "relu6" -> !tfr.attr
    
      %is_relu = tfr.equal %act, %relu -> i1
      %res = scf.if %is_relu -> !tfr.tensor {
        %applied_relu = tfr.call @tf__relu(%add) : (!tfr.tensor) -> !tfr.tensor
        scf.yield %applied_relu : !tfr.tensor
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 13 16:33:28 UTC 2021
    - 4.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfr/integration/graph_decompose_test.py

        t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
        sq = biased_dense(t1, t2, t3, act='relu')
        self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12])
    
      def testWithKnownKernel(self):
    
        @def_function.function
        def biasd_dense_elu(x, y, z):
          dot = gen_composite_ops.my_biased_dense(x, y, z)
          return nn_ops.elu(dot)  # with known kernel, should not expand.
    
        t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 3.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tfr/integration/node_expansion_test.py

        sq = gen_composite_ops.my_biased_dense(t1, t2, t3, act='relu')
        self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12])
    
      def testWithKnownKernel(self):
    
        def biasd_dense_elu(x, y, z):
          dot = gen_composite_ops.my_biased_dense(x, y, z)
          return nn_ops.elu(dot)  # with known kernel, should not expand.
    
        t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 3.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfr/tests/decompose.mlir

      %none_attr = tfr.constant "NONE" -> !tfr.attr
      %relu_attr = tfr.constant "RELU" -> !tfr.attr
      %relu6_attr = tfr.constant "RELU6" -> !tfr.attr
      %reluN1_1_attr = tfr.constant "RELU_N1_TO_1" -> !tfr.attr
      %none:2 = "tfr.quant_act_range"(%none_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
      %relu:2 = "tfr.quant_act_range"(%relu_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 16.7K bytes
    - Viewed (0)
Back to top