diff options
-rw-r--r-- | include/qemu/host-utils.h | 65 |
1 files changed, 0 insertions, 65 deletions
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h index bc743f5..29f3a99 100644 --- a/include/qemu/host-utils.h +++ b/include/qemu/host-utils.h @@ -376,12 +376,7 @@ static inline uint64_t uabs64(int64_t v) */ static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret) { -#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5 return __builtin_add_overflow(x, y, ret); -#else - *ret = x + y; - return ((*ret ^ x) & ~(x ^ y)) < 0; -#endif } /** @@ -394,12 +389,7 @@ static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret) */ static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret) { -#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5 return __builtin_add_overflow(x, y, ret); -#else - *ret = x + y; - return ((*ret ^ x) & ~(x ^ y)) < 0; -#endif } /** @@ -412,12 +402,7 @@ static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret) */ static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret) { -#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5 return __builtin_add_overflow(x, y, ret); -#else - *ret = x + y; - return *ret < x; -#endif } /** @@ -430,12 +415,7 @@ static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret) */ static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret) { -#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5 return __builtin_add_overflow(x, y, ret); -#else - *ret = x + y; - return *ret < x; -#endif } /** @@ -449,12 +429,7 @@ static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret) */ static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret) { -#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5 return __builtin_sub_overflow(x, y, ret); -#else - *ret = x - y; - return ((*ret ^ x) & (x ^ y)) < 0; -#endif } /** @@ -468,12 +443,7 @@ static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret) */ static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret) { -#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5 return __builtin_sub_overflow(x, y, ret); -#else - *ret = x - y; - return ((*ret ^ x) & (x ^ y)) < 0; -#endif } /** @@ -487,12 +457,7 @@ static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret) */ static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret) { -#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5 return __builtin_sub_overflow(x, y, ret); -#else - *ret = x - y; - return x < y; -#endif } /** @@ -506,12 +471,7 @@ static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret) */ static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret) { -#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5 return __builtin_sub_overflow(x, y, ret); -#else - *ret = x - y; - return x < y; -#endif } /** @@ -524,13 +484,7 @@ static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret) */ static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret) { -#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5 return __builtin_mul_overflow(x, y, ret); -#else - int64_t z = (int64_t)x * y; - *ret = z; - return *ret != z; -#endif } /** @@ -543,14 +497,7 @@ static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret) */ static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret) { -#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5 return __builtin_mul_overflow(x, y, ret); -#else - uint64_t hi, lo; - muls64(&lo, &hi, x, y); - *ret = lo; - return hi != ((int64_t)lo >> 63); -#endif } /** @@ -563,13 +510,7 @@ static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret) */ static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret) { -#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5 return __builtin_mul_overflow(x, y, ret); -#else - uint64_t z = (uint64_t)x * y; - *ret = z; - return z > UINT32_MAX; -#endif } /** @@ -582,13 +523,7 @@ static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret) */ static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret) { -#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5 return __builtin_mul_overflow(x, y, ret); -#else - uint64_t hi; - mulu64(ret, &hi, x, y); - return hi != 0; -#endif } /* |