Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for ctanh (0.39 sec)

  1. src/math/all_test.go

    }
    
    func TestAtanh(t *testing.T) {
    	for i := 0; i < len(vf); i++ {
    		a := vf[i] / 10
    		if f := Atanh(a); !veryclose(atanh[i], f) {
    			t.Errorf("Atanh(%g) = %g, want %g", a, f, atanh[i])
    		}
    	}
    	for i := 0; i < len(vfatanhSC); i++ {
    		if f := Atanh(vfatanhSC[i]); !alike(atanhSC[i], f) {
    			t.Errorf("Atanh(%g) = %g, want %g", vfatanhSC[i], f, atanhSC[i])
    		}
    	}
    }
    
    func TestAtan2(t *testing.T) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jul 07 17:39:26 UTC 2023
    - 86.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/mark_for_compilation_pass_test.cc

      ops::Switch switch_b(root.WithOpName("switch_b"), value, cond_b);
    
      Output tanh_a0 = ops::Tanh(root.WithOpName("tan_a0"), switch_a.output_true);
      Output tanh_a1 = ops::Tanh(root.WithOpName("tan_a1"), tanh_a0);
    
      Output tanh_b0 = ops::Tanh(root.WithOpName("tan_b0"), switch_b.output_true);
      Output tanh_b1 = ops::Tanh(root.WithOpName("tan_b1"), tanh_b0);
    
      Output add = ops::Add(root.WithOpName("add"), tanh_a1, tanh_b1);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 10:11:10 UTC 2024
    - 79.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir

          %cst_2, %cst_2, %cst_2,
          %cst_7, %cst_7, %cst_7, %cst_7,
          %cst_2, %cst_2,
          %recurrent_stats, %cell_stats,
          %cst_2, %cst_2, %cst_2, %cst_2) {cell_clip = 1.000000e+01 : f32, fused_activation_function = "TANH", proj_clip = 0.000000e+00 : f32, time_major = false}
        : ( tensor<1x28x28xf32>,
            tensor<20x28xf32>, tensor<20x28xf32>, tensor<20x28xf32>, tensor<20x28xf32>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 52.6K bytes
    - Viewed (0)
  4. tensorflow/cc/gradients/math_grad.cc

    REGISTER_GRADIENT_OP("Acosh", AcoshGrad);
    
    Status AtanhGrad(const Scope& scope, const Operation& op,
                     const std::vector<Output>& grad_inputs,
                     std::vector<Output>* grad_outputs) {
      // y = atanh(x)
      // dy/dx = 1 / (1 - x^2)
      auto one = Cast(scope, Const(scope, 1.0), op.input(0).type());
      auto dydx = Reciprocal(scope, Sub(scope, one, Square(scope, op.input(0))));
      // grad(x) = grad(y) * conj(dy/dx)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 50.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

            $filter_width, $filter_height, ActFnAttr),
        [(HasOneUse $pool_out)]>;
    }
    
    // TODO(hinsu): Also fuse ops corresponding to SIGN_BIT fused
    // activation functions.
    // Currently we're not fusing tanh, sigmoid, hard_swish and other activations
    // those cannot be simply translated into clamping.
    foreach actFnPair = [[TFL_ReluOp, TFL_AF_Relu],
                         [TFL_Relu6Op, TFL_AF_Relu6],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/mark_for_compilation_pass.cc

                "Atan", "Atanh", "Ceil", "Cos", "Cosh", "Sin", "Exp", "Expm1",
                "Floor", "IsFinite", "IsInf", "IsNan", "Inv", "Reciprocal", "Log",
                "Log1p", "Invert", "LogicalNot", "Ndtri", "Neg", "Rint", "Round",
                "Rsqrt", "Sigmoid", "Sign", "Sinh", "Softplus", "Softsign", "Sqrt",
                "Square", "Tan", "Tanh", "Real", "Imag", "Erf", "Erfc", "Erfinv",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
Back to top