Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 94 for tanh (0.07 sec)

  1. src/math/tanh.go

    	}
    	return z
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 11 16:34:30 UTC 2022
    - 2.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_begin.mlir

      // CHECK: %[[ARG_TRANSPOSE:[0-9]*]] = "tf.Transpose"(%arg0, %[[ARG_PERM]])
      // CHECK: %[[TANH:[0-9]*]] = "tf.Tanh"(%[[ARG_TRANSPOSE]]) {{.*}} tensor<1x8x4x4xf32>
      // CHECK: %[[ADD:[0-9]*]] = "tf.AddV2"(%[[TANH]], %[[TANH]]) {{.*}} tensor<1x8x4x4xf32>
      // CHECK: return %[[ADD]]
    
      %0 = "tf.Tanh"(%arg0) : (tensor<1x4x4x8xf32>) -> tensor<1x4x4x8xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py

      elif act == 'TANH':
        return tf.raw_ops.Tanh(x=res)
      else:
        return res
    
    
    @tf.RegisterGradient('NewConv2D')
    def _conv_add_relu_grad(op: ops.Operation, grad):
      act = op.get_attr('act')
      y = op.outputs[0]
      if act == 'RELU':
        grad = gen_nn_ops.relu_grad(grad, y)
      elif act == 'RELU6':
        grad = gen_nn_ops.relu6_grad(grad, y)
      elif act == 'TANH':
        y = math_ops.conj(y)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 31 20:23:51 UTC 2023
    - 6.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/tests/opens2s_gnmt_mixed_precision.golden_summary

     Const 7
     GreaterEqual 2
     MatMul 1
     Mul 5
     Select 2
     Sigmoid 3
     Snapshot 1
     Split 1
     Tanh 2
    cluster 22 size 28
     Add 3
     BiasAdd 1
     Cast 1
     ConcatV2 1
     Const 5
     GreaterEqual 1
     MatMul 1
     Mul 5
     Select 3
     Sigmoid 3
     Snapshot 1
     Split 1
     Tanh 2
    cluster 23 size 423
     Add 12
     AddN 28
     BiasAddGrad 6
     BroadcastGradientArgs 12
     Cast 12
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 06 10:38:14 UTC 2023
    - 5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_end.mlir

      // CHECK: %[[RES_PERM:.*]] = "tf.Const"() <{value = dense<[0, 3, 1, 2]> : tensor<4xi32>}>
      // CHECK: %[[TANH:[0-9]*]] = "tf.Tanh"(%arg0) {{.*}} tensor<1x4x4x8xf32>
      // CHECK: %[[RES_TRANSPOSE:[0-9]*]] = "tf.Transpose"(%[[TANH]], %[[RES_PERM]]) {{.*}} tensor<1x8x4x4xf32>
      // CHECK: return %[[RES_TRANSPOSE]]
    
      %0 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.5K bytes
    - Viewed (0)
  6. src/math/cmplx/cmath_test.go

    			t.Errorf("Tan(%g) = %g, want %g", -v.in, f, -v.want)
    		}
    	}
    }
    func TestTanh(t *testing.T) {
    	for i := 0; i < len(vc); i++ {
    		if f := Tanh(vc[i]); !cSoclose(tanh[i], f, 2e-15) {
    			t.Errorf("Tanh(%g) = %g, want %g", vc[i], f, tanh[i])
    		}
    	}
    	for _, v := range tanhSC {
    		if f := Tanh(v.in); !cAlike(v.want, f) {
    			t.Errorf("Tanh(%g) = %g, want %g", v.in, f, v.want)
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 01 03:16:37 UTC 2020
    - 48.1K bytes
    - Viewed (0)
  7. src/math/tanh_s390x.s

    DATA tanhtab<>+112(SB)/8, $-.218623539150173528E-01
    DATA tanhtab<>+120(SB)/8, $-.115062908917949451E-01
    GLOBL tanhtab<>+0(SB), RODATA, $128
    
    // Tanh returns the hyperbolic tangent of the argument.
    //
    // Special cases are:
    //      Tanh(±0) = ±0
    //      Tanh(±Inf) = ±1
    //      Tanh(NaN) = NaN
    // The algorithm used is minimax polynomial approximation using a table of
    // polynomial coefficients determined with a Remez exchange algorithm.
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 16 15:34:41 UTC 2019
    - 4.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/unidirectional_sequence_rnn.mlir

    // CHECK-NEXT:         builtin_options_type: SequenceRNNOptions,
    // CHECK-NEXT:         builtin_options: {
    // CHECK-NEXT:           time_major: true,
    // CHECK-NEXT:           fused_activation_function: TANH
    // CHECK-NEXT:         }
    // CHECK-NEXT:       } ],
    // CHECK-NEXT:       name: "main"
    // CHECK-NEXT:     } ],
    // CHECK-NEXT:     description: "MLIR Converted.",
    // CHECK-NEXT:     buffers: [ {
    // CHECK-EMPTY:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 3.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tfr/examples/mnist/mnist_ops_test.py

                                   tf.function(gen_mnist_ops.new_conv2d),
                                   ops_defs._composite_conv_add_relu, kwargs)
    
      def test_new_conv2d_tanh(self):
        self.skipTest('Fix tanh gradients')
        input_ = tf.random.uniform([1, 4, 4, 1])
        filter_ = tf.random.uniform([2, 2, 1, 8])
        bias = tf.zeros([8])
        kwargs = {
            'input_': input_,
            'filter_': filter_,
            'bias': bias,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range-float16.mlir

        %5, %6, %7, %8,
        %9, %9, %9,
        %10, %11,
        %10, %10,
        %9, %9,
        %recurrent_input, %cell_input,
        %9, %9, %9, %9) {
          cell_clip = 1.000000e+01 : f32,
          fused_activation_function = "TANH",
          proj_clip = 0.000000e+00 : f32,
          time_major = false} : (
            tensor<1x2x3xf32>,
            tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.6K bytes
    - Viewed (0)
Back to top