mirror of
https://github.com/yuzu-emu/unicorn
synced 2024-11-24 21:38:20 +00:00
include/fpu/softfloat: implement float16_abs helper
This will be required when expanding the MINMAX() macro for 16 bit/half-precision operations. Backports commit 28136775cd99c628f7d7c642b04eb87f062efef8 from qemu
This commit is contained in:
parent
0eee5afd0e
commit
facbc9ef66
1 changed files with 8 additions and 0 deletions
|
@ -252,6 +252,14 @@ static inline int float16_is_any_nan(float16 a)
|
|||
return ((float16_val(a) & ~0x8000) > 0x7c00);
|
||||
}
|
||||
|
||||
static inline float16 float16_abs(float16 a)
|
||||
{
|
||||
/* Note that abs does *not* handle NaN specially, nor does
|
||||
* it flush denormal inputs to zero.
|
||||
*/
|
||||
return make_float16(float16_val(a) & 0x7fff);
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------------------
|
||||
| The pattern for a default generated half-precision NaN.
|
||||
*----------------------------------------------------------------------------*/
|
||||
|
|
Loading…
Reference in a new issue