Brad Bishop | 6e60e8b | 2018-02-01 10:27:11 -0500 | [diff] [blame] | 1 | From d7b6df5808e7bef5930b61a82e880699a9f9e208 Mon Sep 17 00:00:00 2001 |
| 2 | From: Khem Raj <raj.khem@gmail.com> |
| 3 | Date: Thu, 29 Jun 2017 15:39:19 -0700 |
| 4 | Subject: [PATCH] implement 64bit atomic for mips |
| 5 | |
| 6 | GCC does not provide 64bit atomics for mips32 |
| 7 | |
| 8 | Signed-off-by: Khem Raj <raj.khem@gmail.com> |
| 9 | --- |
| 10 | src/Makefile.am | 1 + |
| 11 | src/atomic64.c | 228 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ |
| 12 | 2 files changed, 229 insertions(+) |
| 13 | create mode 100644 src/atomic64.c |
| 14 | |
| 15 | diff --git a/src/Makefile.am b/src/Makefile.am |
| 16 | index 99aaace0..cbbbbee9 100644 |
| 17 | --- a/src/Makefile.am |
| 18 | +++ b/src/Makefile.am |
| 19 | @@ -27,6 +27,7 @@ libtorrent_la_LIBADD = \ |
| 20 | utils/libsub_utils.la |
| 21 | |
| 22 | libtorrent_la_SOURCES = \ |
| 23 | + atomic64.c \ |
| 24 | globals.cc \ |
| 25 | globals.h \ |
| 26 | manager.cc \ |
| 27 | diff --git a/src/atomic64.c b/src/atomic64.c |
| 28 | new file mode 100644 |
| 29 | index 00000000..f841b39b |
| 30 | --- /dev/null |
| 31 | +++ b/src/atomic64.c |
| 32 | @@ -0,0 +1,228 @@ |
| 33 | +/*===----- atomic64.c - Support functions for 64-bit atomic operations.-----=== |
| 34 | + * |
| 35 | + * The LLVM Compiler Infrastructure |
| 36 | + * |
| 37 | + * This file is dual licensed under the MIT and the University of Illinois Open |
| 38 | + * Source Licenses. See LICENSE.TXT for details. |
| 39 | + * |
| 40 | + *===-----------------------------------------------------------------------=== |
| 41 | + * |
| 42 | + * atomic64.c defines a set of functions for performing atomic accesses on |
| 43 | + * 64-bit memory locations. It also implements spinlock synchronization |
| 44 | + * operations. |
| 45 | + * |
| 46 | + *===-----------------------------------------------------------------------=== |
| 47 | + */ |
| 48 | + |
| 49 | +#include <stdint.h> |
| 50 | +#include <stdbool.h> |
| 51 | + |
| 52 | +/* |
| 53 | + * only need these on MIPS, since it lacks hardware 64-bit atomics, |
| 54 | + * unlike x86 and ARM. |
| 55 | + */ |
| 56 | +#if defined(__mips__) || defined(__mipsel__) |
| 57 | + |
| 58 | +static void __spin_lock(volatile int *lock) { |
| 59 | + while (__sync_lock_test_and_set(lock, 1)) |
| 60 | + while (*lock) {} |
| 61 | +} |
| 62 | + |
| 63 | +static void __spin_unlock(volatile int *lock) { |
| 64 | + __sync_lock_release(lock); |
| 65 | +} |
| 66 | + |
| 67 | +/* |
| 68 | + * Make sure the lock is on its own cache line to prevent false sharing. |
| 69 | + * Put it inside a struct that is aligned and padded to the typical MIPS |
| 70 | + * cacheline which is 32 bytes. |
| 71 | + */ |
| 72 | +static struct { |
| 73 | + int lock; |
| 74 | + char pad[32 - sizeof(int)]; |
| 75 | +} __attribute__((aligned (32))) lock = { 0 }; |
| 76 | + |
| 77 | + |
| 78 | +uint64_t __sync_fetch_and_add_8(volatile uint64_t *ptr, uint64_t val) { |
| 79 | + uint64_t ret; |
| 80 | + |
| 81 | + __spin_lock(&lock.lock); |
| 82 | + |
| 83 | + ret = *ptr; |
| 84 | + *ptr = ret + val; |
| 85 | + |
| 86 | + __spin_unlock(&lock.lock); |
| 87 | + |
| 88 | + return ret; |
| 89 | +} |
| 90 | + |
| 91 | +uint64_t __sync_fetch_and_sub_8(volatile uint64_t *ptr, uint64_t val) { |
| 92 | + uint64_t ret; |
| 93 | + |
| 94 | + __spin_lock(&lock.lock); |
| 95 | + |
| 96 | + ret = *ptr; |
| 97 | + *ptr = ret - val; |
| 98 | + |
| 99 | + __spin_unlock(&lock.lock); |
| 100 | + |
| 101 | + return ret; |
| 102 | +} |
| 103 | + |
| 104 | +uint64_t __sync_fetch_and_and_8(volatile uint64_t *ptr, uint64_t val) { |
| 105 | + uint64_t ret; |
| 106 | + |
| 107 | + __spin_lock(&lock.lock); |
| 108 | + |
| 109 | + ret = *ptr; |
| 110 | + *ptr = ret & val; |
| 111 | + |
| 112 | + __spin_unlock(&lock.lock); |
| 113 | + |
| 114 | + return ret; |
| 115 | +} |
| 116 | + |
| 117 | +uint64_t __sync_fetch_and_or_8(volatile uint64_t *ptr, uint64_t val) { |
| 118 | + uint64_t ret; |
| 119 | + |
| 120 | + __spin_lock(&lock.lock); |
| 121 | + |
| 122 | + ret = *ptr; |
| 123 | + *ptr = ret | val; |
| 124 | + |
| 125 | + __spin_unlock(&lock.lock); |
| 126 | + |
| 127 | + return ret; |
| 128 | +} |
| 129 | + |
| 130 | +uint64_t __sync_fetch_and_xor_8(volatile uint64_t *ptr, uint64_t val) { |
| 131 | + uint64_t ret; |
| 132 | + |
| 133 | + __spin_lock(&lock.lock); |
| 134 | + |
| 135 | + ret = *ptr; |
| 136 | + *ptr = ret ^ val; |
| 137 | + |
| 138 | + __spin_unlock(&lock.lock); |
| 139 | + |
| 140 | + return ret; |
| 141 | +} |
| 142 | + |
| 143 | +uint64_t __sync_add_and_fetch_8(volatile uint64_t *ptr, uint64_t val) { |
| 144 | + uint64_t ret; |
| 145 | + |
| 146 | + __spin_lock(&lock.lock); |
| 147 | + |
| 148 | + ret = *ptr + val; |
| 149 | + *ptr = ret; |
| 150 | + |
| 151 | + __spin_unlock(&lock.lock); |
| 152 | + |
| 153 | + return ret; |
| 154 | +} |
| 155 | + |
| 156 | +uint64_t __sync_sub_and_fetch_8(volatile uint64_t *ptr, uint64_t val) { |
| 157 | + uint64_t ret; |
| 158 | + |
| 159 | + __spin_lock(&lock.lock); |
| 160 | + |
| 161 | + ret = *ptr - val; |
| 162 | + *ptr = ret; |
| 163 | + |
| 164 | + __spin_unlock(&lock.lock); |
| 165 | + |
| 166 | + return ret; |
| 167 | +} |
| 168 | + |
| 169 | +uint64_t __sync_and_and_fetch_8(volatile uint64_t *ptr, uint64_t val) { |
| 170 | + uint64_t ret; |
| 171 | + |
| 172 | + __spin_lock(&lock.lock); |
| 173 | + |
| 174 | + ret = *ptr & val; |
| 175 | + *ptr = ret; |
| 176 | + |
| 177 | + __spin_unlock(&lock.lock); |
| 178 | + |
| 179 | + return ret; |
| 180 | +} |
| 181 | + |
| 182 | +uint64_t __sync_or_and_fetch_8(volatile uint64_t *ptr, uint64_t val) { |
| 183 | + uint64_t ret; |
| 184 | + |
| 185 | + __spin_lock(&lock.lock); |
| 186 | + |
| 187 | + ret = *ptr | val; |
| 188 | + *ptr = ret; |
| 189 | + |
| 190 | + __spin_unlock(&lock.lock); |
| 191 | + |
| 192 | + return ret; |
| 193 | +} |
| 194 | + |
| 195 | +uint64_t __sync_xor_and_fetch_8(volatile uint64_t *ptr, uint64_t val) { |
| 196 | + uint64_t ret; |
| 197 | + |
| 198 | + __spin_lock(&lock.lock); |
| 199 | + |
| 200 | + ret = *ptr ^ val; |
| 201 | + *ptr = ret; |
| 202 | + |
| 203 | + __spin_unlock(&lock.lock); |
| 204 | + |
| 205 | + return ret; |
| 206 | +} |
| 207 | + |
| 208 | +bool __sync_bool_compare_and_swap_8(volatile uint64_t *ptr, |
| 209 | + uint64_t oldval, uint64_t newval) { |
| 210 | + bool ret = false; |
| 211 | + |
| 212 | + __spin_lock(&lock.lock); |
| 213 | + |
| 214 | + if (*ptr == oldval) { |
| 215 | + *ptr = newval; |
| 216 | + ret = true; |
| 217 | + } |
| 218 | + |
| 219 | + __spin_unlock(&lock.lock); |
| 220 | + |
| 221 | + return ret; |
| 222 | +} |
| 223 | + |
| 224 | +uint64_t __sync_val_compare_and_swap_8(volatile uint64_t *ptr, |
| 225 | + uint64_t oldval, uint64_t newval) { |
| 226 | + uint64_t ret; |
| 227 | + |
| 228 | + __spin_lock(&lock.lock); |
| 229 | + |
| 230 | + ret = *ptr; |
| 231 | + if (ret == oldval) |
| 232 | + *ptr = newval; |
| 233 | + |
| 234 | + __spin_unlock(&lock.lock); |
| 235 | + |
| 236 | + return ret; |
| 237 | +} |
| 238 | + |
| 239 | +uint64_t __sync_lock_test_and_set_8(volatile uint64_t *ptr, uint64_t val) { |
| 240 | + uint64_t ret; |
| 241 | + |
| 242 | + __spin_lock(&lock.lock); |
| 243 | + |
| 244 | + ret = *ptr; |
| 245 | + *ptr = val; |
| 246 | + |
| 247 | + __spin_unlock(&lock.lock); |
| 248 | + |
| 249 | + return ret; |
| 250 | +} |
| 251 | + |
| 252 | +void __sync_lock_release_8(volatile uint64_t *ptr) { |
| 253 | + __spin_lock(&lock.lock); |
| 254 | + |
| 255 | + *ptr = 0; |
| 256 | + |
| 257 | + __spin_unlock(&lock.lock); |
| 258 | +} |
| 259 | + |
| 260 | +#endif |
| 261 | -- |
| 262 | 2.13.2 |
| 263 | |