mirror of
https://github.com/mikeoliphant/neural-amp-modeler-lv2.git
synced 2026-05-07 04:00:09 +02:00
Use fabsf to ensure no double promotion
Signed-off-by: falkTX <falktx@falktx.com>
This commit is contained in:
+2
-2
@@ -192,13 +192,13 @@ void sigmoid_(Eigen::MatrixXf &x, const long i_start, const long i_end,
|
|||||||
|
|
||||||
inline float fast_tanh_(const float x)
|
inline float fast_tanh_(const float x)
|
||||||
{
|
{
|
||||||
const float ax = fabs(x);
|
const float ax = fabsf(x);
|
||||||
const float x2 = x * x;
|
const float x2 = x * x;
|
||||||
|
|
||||||
return(x * (2.45550750702956f + 2.45550750702956f * ax +
|
return(x * (2.45550750702956f + 2.45550750702956f * ax +
|
||||||
(0.893229853513558f + 0.821226666969744f * ax) * x2) /
|
(0.893229853513558f + 0.821226666969744f * ax) * x2) /
|
||||||
(2.44506634652299f + (2.44506634652299f + x2) *
|
(2.44506634652299f + (2.44506634652299f + x2) *
|
||||||
fabs(x + 0.814642734961073f * x * ax)));
|
fabsf(x + 0.814642734961073f * x * ax)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void tanh_(Eigen::MatrixXf& x)
|
void tanh_(Eigen::MatrixXf& x)
|
||||||
|
|||||||
Reference in New Issue
Block a user