- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 30 for Sigmoid (1.62 sec)
-
tensorflow/compiler/jit/tests/opens2s_gnmt_mixed_precision.golden_summary
Add 2 BiasAdd 1 Cast 1 ConcatV2 1 Const 7 GreaterEqual 2 MatMul 1 Mul 5 Select 2 Sigmoid 3 Snapshot 1 Split 1 Tanh 2 cluster 22 size 28 Add 3 BiasAdd 1 Cast 1 ConcatV2 1 Const 5 GreaterEqual 1 MatMul 1 Mul 5 Select 3 Sigmoid 3 Snapshot 1 Split 1 Tanh 2 cluster 23 size 423 Add 12 AddN 28 BiasAddGrad 6
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 06 10:38:14 UTC 2023 - 5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/tf-tfl-translate-serialize-stablehlo-logistic.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 28 23:30:25 UTC 2023 - 288 bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad_test.cc
} TEST_F(CWiseUnaryGradTest, Sigmoid) { auto x_fn = [this](const int i) { return RV({0, -1, 1, -2, 2, -3, 3}); }; TestCWiseGrad<float, float>(SIGMOID, x_fn); } TEST_F(CWiseUnaryGradTest, Sigmoid_Complex) { auto x_fn = [this](const int i) { return CRV({{1, 0}, {0, 0}, {2, -1}, {1, 2}, {3, 4}}); }; TestCWiseGrad<complex64, complex64>(SIGMOID, x_fn); } TEST_F(CWiseUnaryGradTest, Sign) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 36K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h
// inconsistent with Conv 1x1 which always performs per channel quantization. bool disable_per_channel_for_dense_layers = false; // Whether to use fixed output ranges of the activation ops (tanh, sigmoid, // etc.) and not infer weight constants. // If this option is set, quantization emulation ops should be placed after // the ops in the input graph. This flag should be set to false for // post-training quantization.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 10:16:19 UTC 2024 - 10.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td
(CastValueToI64 $old, $shape), MHLO_RngDistributionValue<"NORMAL">), [(IsShapedTensor $shape)]>; //===----------------------------------------------------------------------===// // Sigmoid grad op. //===----------------------------------------------------------------------===// // TODO(hinsu): Handle unranked inputs by broadcasting constant one to the // shape of $l instead of having it as a constant.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 34.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_patterns.td
// ============================================================================= // Training OPs // ============================================================================= // `grad = dy * y * (1 - y)`, where `y = sigmoid(x)` def LegalizeSigmoidGrad : Pat<(TF_SigmoidGradOp $y, $dy), (TFL_MulOp $dy, (TFL_MulOp $y, (TFL_SubOp (Arith_ConstantOp ConstantAttr<RankedF32ElementsAttr<[]>, "1.0f">), $y, TFL_AF_None),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 28.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
// be checked first if present. // TODO: b/323478683: Consider deprecating this. struct OpQuantScaleSpec { // Whether this op has a fixed range requirement (e.g. sigmoid) bool has_fixed_output_range = false; // Whether this op should have same operand and result scales (e.g. concat) bool has_same_scale_requirement = false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad.cc
auto y = ConjugateHelper(grad_scope, op.output(0)); grad_outputs->push_back(internal::SigmoidGrad(grad_scope, y, grad)); return grad_scope.status(); } REGISTER_GRADIENT_OP("Sigmoid", SigmoidGrad); Status SignGrad(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 50.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir
// CHECK-LABEL: argmin // CHECK: "tfl.arg_min"(%arg0, %arg1) : (tensor<3xi32>, tensor<i32>) -> tensor<i32> } func.func @sigmoid(%arg0: tensor<?x88xf32>) -> tensor<?x88xf32> { %0 = "tf.Sigmoid"(%arg0) : (tensor<?x88xf32>) -> tensor<?x88xf32> func.return %0 : tensor<?x88xf32> // CHECK-LABEL: sigmoid // CHECK: "tfl.logistic"(%arg0) : (tensor<?x88xf32>) -> tensor<?x88xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 05 01:54:33 UTC 2024 - 153.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
$filter_width, $filter_height, ActFnAttr), [(HasOneUse $pool_out)]>; } // TODO(hinsu): Also fuse ops corresponding to SIGN_BIT fused // activation functions. // Currently we're not fusing tanh, sigmoid, hard_swish and other activations // those cannot be simply translated into clamping. foreach actFnPair = [[TFL_ReluOp, TFL_AF_Relu], [TFL_Relu6Op, TFL_AF_Relu6],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0)