|
17 | 17 | #include <atomic>
|
18 | 18 | #include <stdarg.h>
|
19 | 19 | #include <stdlib.h>
|
| 20 | +#include <string.h> |
20 | 21 |
|
21 | 22 | #define KMP_FTN_PLAIN 1
|
22 | 23 | #define KMP_FTN_APPEND 2
|
@@ -864,15 +865,25 @@ static inline bool mips_sync_val_compare_and_swap(volatile kmp_uint64 *p,
|
864 | 865 | __sync_lock_test_and_set((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
|
865 | 866 |
|
866 | 867 | inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) {
|
867 |
| - kmp_int32 tmp = |
868 |
| - __sync_lock_test_and_set((volatile kmp_uint32 *)(p), *(kmp_uint32 *)&v); |
869 |
| - return *(kmp_real32 *)&tmp; |
| 868 | + volatile kmp_uint32 *up; |
| 869 | + kmp_uint32 uv; |
| 870 | + memcpy(&up, &p, sizeof(up)); |
| 871 | + memcpy(&uv, &v, sizeof(uv)); |
| 872 | + kmp_int32 tmp = __sync_lock_test_and_set(up, uv); |
| 873 | + kmp_real32 ftmp; |
| 874 | + memcpy(&ftmp, &tmp, sizeof(tmp)); |
| 875 | + return ftmp; |
870 | 876 | }
|
871 | 877 |
|
872 | 878 | inline kmp_real64 KMP_XCHG_REAL64(volatile kmp_real64 *p, kmp_real64 v) {
|
873 |
| - kmp_int64 tmp = |
874 |
| - __sync_lock_test_and_set((volatile kmp_uint64 *)(p), *(kmp_uint64 *)&v); |
875 |
| - return *(kmp_real64 *)&tmp; |
| 879 | + volatile kmp_uint64 *up; |
| 880 | + kmp_uint64 uv; |
| 881 | + memcpy(&up, &p, sizeof(up)); |
| 882 | + memcpy(&uv, &v, sizeof(uv)); |
| 883 | + kmp_int64 tmp = __sync_lock_test_and_set(up, uv); |
| 884 | + kmp_real64 dtmp; |
| 885 | + memcpy(&dtmp, &tmp, sizeof(tmp)); |
| 886 | + return dtmp; |
876 | 887 | }
|
877 | 888 |
|
878 | 889 | #else
|
|
0 commit comments