00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031 #ifndef _GLIBCXX_ATOMIC_0_H
00032 #define _GLIBCXX_ATOMIC_0_H 1
00033
00034 #pragma GCC system_header
00035
00036
00037
00038
00039 namespace __atomic0
00040 {
00041 struct atomic_flag;
00042
00043
00044 #define _ATOMIC_LOAD_(__a, __x) \
00045 ({ volatile __typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
00046 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
00047 __atomic_flag_wait_explicit(__g, __x); \
00048 __typeof__ _ATOMIC_MEMBER_ __r = *__p; \
00049 atomic_flag_clear_explicit(__g, __x); \
00050 __r; })
00051
00052 #define _ATOMIC_STORE_(__a, __m, __x) \
00053 ({ volatile __typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
00054 __typeof__(__m) __v = (__m); \
00055 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
00056 __atomic_flag_wait_explicit(__g, __x); \
00057 *__p = __v; \
00058 atomic_flag_clear_explicit(__g, __x); \
00059 __v; })
00060
00061 #define _ATOMIC_MODIFY_(__a, __o, __m, __x) \
00062 ({ volatile __typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
00063 __typeof__(__m) __v = (__m); \
00064 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
00065 __atomic_flag_wait_explicit(__g, __x); \
00066 __typeof__ _ATOMIC_MEMBER_ __r = *__p; \
00067 *__p __o __v; \
00068 atomic_flag_clear_explicit(__g, __x); \
00069 __r; })
00070
00071 #define _ATOMIC_CMPEXCHNG_(__a, __e, __m, __x) \
00072 ({ volatile __typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
00073 __typeof__(__e) __q = (__e); \
00074 __typeof__(__m) __v = (__m); \
00075 bool __r; \
00076 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
00077 __atomic_flag_wait_explicit(__g, __x); \
00078 __typeof__ _ATOMIC_MEMBER_ __t__ = *__p; \
00079 if (__t__ == *__q) { *__p = __v; __r = true; } \
00080 else { *__q = __t__; __r = false; } \
00081 atomic_flag_clear_explicit(__g, __x); \
00082 __r; })
00083
00084
00085 struct atomic_flag : public __atomic_flag_base
00086 {
00087 atomic_flag() = default;
00088 ~atomic_flag() = default;
00089 atomic_flag(const atomic_flag&) = delete;
00090 atomic_flag& operator=(const atomic_flag&) = delete;
00091
00092
00093 atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
00094
00095 bool
00096 test_and_set(memory_order __m = memory_order_seq_cst) volatile;
00097
00098 void
00099 clear(memory_order __m = memory_order_seq_cst) volatile;
00100 };
00101
00102
00103 struct atomic_address
00104 {
00105 private:
00106 void* _M_i;
00107
00108 public:
00109 atomic_address() = default;
00110 ~atomic_address() = default;
00111 atomic_address(const atomic_address&) = delete;
00112 atomic_address& operator=(const atomic_address&) = delete;
00113
00114 atomic_address(void* __v) { _M_i = __v; }
00115
00116 bool
00117 is_lock_free() const volatile
00118 { return false; }
00119
00120 void
00121 store(void* __v, memory_order __m = memory_order_seq_cst) volatile
00122 {
00123 __glibcxx_assert(__m != memory_order_acquire);
00124 __glibcxx_assert(__m != memory_order_acq_rel);
00125 __glibcxx_assert(__m != memory_order_consume);
00126 _ATOMIC_STORE_(this, __v, __m);
00127 }
00128
00129 void*
00130 load(memory_order __m = memory_order_seq_cst) const volatile
00131 {
00132 __glibcxx_assert(__m != memory_order_release);
00133 __glibcxx_assert(__m != memory_order_acq_rel);
00134 return _ATOMIC_LOAD_(this, __m);
00135 }
00136
00137 void*
00138 exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
00139 { return _ATOMIC_MODIFY_(this, =, __v, __m); }
00140
00141 bool
00142 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
00143 memory_order __m2) volatile
00144 {
00145 __glibcxx_assert(__m2 != memory_order_release);
00146 __glibcxx_assert(__m2 != memory_order_acq_rel);
00147 __glibcxx_assert(__m2 <= __m1);
00148 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
00149 }
00150
00151 bool
00152 compare_exchange_weak(void*& __v1, void* __v2,
00153 memory_order __m = memory_order_seq_cst) volatile
00154 {
00155 return compare_exchange_weak(__v1, __v2, __m,
00156 __calculate_memory_order(__m));
00157 }
00158
00159 bool
00160 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
00161 memory_order __m2) volatile
00162 {
00163 __glibcxx_assert(__m2 != memory_order_release);
00164 __glibcxx_assert(__m2 != memory_order_acq_rel);
00165 __glibcxx_assert(__m2 <= __m1);
00166 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
00167 }
00168
00169 bool
00170 compare_exchange_strong(void*& __v1, void* __v2,
00171 memory_order __m = memory_order_seq_cst) volatile
00172 {
00173 return compare_exchange_strong(__v1, __v2, __m,
00174 __calculate_memory_order(__m));
00175 }
00176
00177 void*
00178 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
00179 {
00180 void* volatile* __p = &(_M_i);
00181 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p);
00182 __atomic_flag_wait_explicit(__g, __m);
00183 void* __r = *__p;
00184 *__p = (void*)((char*)(*__p) + __d);
00185 atomic_flag_clear_explicit(__g, __m);
00186 return __r;
00187 }
00188
00189 void*
00190 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
00191 {
00192 void* volatile* __p = &(_M_i);
00193 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p);
00194 __atomic_flag_wait_explicit(__g, __m);
00195 void* __r = *__p;
00196 *__p = (void*)((char*)(*__p) - __d);
00197 atomic_flag_clear_explicit(__g, __m);
00198 return __r;
00199 }
00200
00201 operator void*() const volatile
00202 { return load(); }
00203
00204 void*
00205 operator=(void* __v)
00206 {
00207 store(__v);
00208 return __v;
00209 }
00210
00211 void*
00212 operator+=(ptrdiff_t __d) volatile
00213 { return fetch_add(__d) + __d; }
00214
00215 void*
00216 operator-=(ptrdiff_t __d) volatile
00217 { return fetch_sub(__d) - __d; }
00218 };
00219
00220
00221
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243 template<typename _ITp>
00244 struct __atomic_base
00245 {
00246 private:
00247 typedef _ITp __integral_type;
00248
00249 __integral_type _M_i;
00250
00251 public:
00252 __atomic_base() = default;
00253 ~__atomic_base() = default;
00254 __atomic_base(const __atomic_base&) = delete;
00255 __atomic_base& operator=(const __atomic_base&) = delete;
00256
00257
00258 __atomic_base(__integral_type __i) { _M_i = __i; }
00259
00260 operator __integral_type() const volatile
00261 { return load(); }
00262
00263 __integral_type
00264 operator=(__integral_type __i)
00265 {
00266 store(__i);
00267 return __i;
00268 }
00269
00270 __integral_type
00271 operator++(int) volatile
00272 { return fetch_add(1); }
00273
00274 __integral_type
00275 operator--(int) volatile
00276 { return fetch_sub(1); }
00277
00278 __integral_type
00279 operator++() volatile
00280 { return fetch_add(1) + 1; }
00281
00282 __integral_type
00283 operator--() volatile
00284 { return fetch_sub(1) - 1; }
00285
00286 __integral_type
00287 operator+=(__integral_type __i) volatile
00288 { return fetch_add(__i) + __i; }
00289
00290 __integral_type
00291 operator-=(__integral_type __i) volatile
00292 { return fetch_sub(__i) - __i; }
00293
00294 __integral_type
00295 operator&=(__integral_type __i) volatile
00296 { return fetch_and(__i) & __i; }
00297
00298 __integral_type
00299 operator|=(__integral_type __i) volatile
00300 { return fetch_or(__i) | __i; }
00301
00302 __integral_type
00303 operator^=(__integral_type __i) volatile
00304 { return fetch_xor(__i) ^ __i; }
00305
00306 bool
00307 is_lock_free() const volatile
00308 { return false; }
00309
00310 void
00311 store(__integral_type __i,
00312 memory_order __m = memory_order_seq_cst) volatile
00313 {
00314 __glibcxx_assert(__m != memory_order_acquire);
00315 __glibcxx_assert(__m != memory_order_acq_rel);
00316 __glibcxx_assert(__m != memory_order_consume);
00317 _ATOMIC_STORE_(this, __i, __m);
00318 }
00319
00320 __integral_type
00321 load(memory_order __m = memory_order_seq_cst) const volatile
00322 {
00323 __glibcxx_assert(__m != memory_order_release);
00324 __glibcxx_assert(__m != memory_order_acq_rel);
00325 return _ATOMIC_LOAD_(this, __m);
00326 }
00327
00328 __integral_type
00329 exchange(__integral_type __i,
00330 memory_order __m = memory_order_seq_cst) volatile
00331 { return _ATOMIC_MODIFY_(this, =, __i, __m); }
00332
00333 bool
00334 compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
00335 memory_order __m1, memory_order __m2) volatile
00336 {
00337 __glibcxx_assert(__m2 != memory_order_release);
00338 __glibcxx_assert(__m2 != memory_order_acq_rel);
00339 __glibcxx_assert(__m2 <= __m1);
00340 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
00341 }
00342
00343 bool
00344 compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
00345 memory_order __m = memory_order_seq_cst) volatile
00346 {
00347 return compare_exchange_weak(__i1, __i2, __m,
00348 __calculate_memory_order(__m));
00349 }
00350
00351 bool
00352 compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
00353 memory_order __m1, memory_order __m2) volatile
00354 {
00355 __glibcxx_assert(__m2 != memory_order_release);
00356 __glibcxx_assert(__m2 != memory_order_acq_rel);
00357 __glibcxx_assert(__m2 <= __m1);
00358 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
00359 }
00360
00361 bool
00362 compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
00363 memory_order __m = memory_order_seq_cst) volatile
00364 {
00365 return compare_exchange_strong(__i1, __i2, __m,
00366 __calculate_memory_order(__m));
00367 }
00368
00369 __integral_type
00370 fetch_add(__integral_type __i,
00371 memory_order __m = memory_order_seq_cst) volatile
00372 { return _ATOMIC_MODIFY_(this, +=, __i, __m); }
00373
00374 __integral_type
00375 fetch_sub(__integral_type __i,
00376 memory_order __m = memory_order_seq_cst) volatile
00377 { return _ATOMIC_MODIFY_(this, -=, __i, __m); }
00378
00379 __integral_type
00380 fetch_and(__integral_type __i,
00381 memory_order __m = memory_order_seq_cst) volatile
00382 { return _ATOMIC_MODIFY_(this, &=, __i, __m); }
00383
00384 __integral_type
00385 fetch_or(__integral_type __i,
00386 memory_order __m = memory_order_seq_cst) volatile
00387 { return _ATOMIC_MODIFY_(this, |=, __i, __m); }
00388
00389 __integral_type
00390 fetch_xor(__integral_type __i,
00391 memory_order __m = memory_order_seq_cst) volatile
00392 { return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
00393 };
00394
00395
00396
00397
00398 struct atomic_bool
00399 {
00400 private:
00401 __atomic_base<bool> _M_base;
00402
00403 public:
00404 atomic_bool() = default;
00405 ~atomic_bool() = default;
00406 atomic_bool(const atomic_bool&) = delete;
00407 atomic_bool& operator=(const atomic_bool&) = delete;
00408
00409 atomic_bool(bool __i) : _M_base(__i) { }
00410
00411 bool
00412 operator=(bool __i)
00413 { return _M_base.operator=(__i); }
00414
00415 operator bool() const volatile
00416 { return _M_base.load(); }
00417
00418 bool
00419 is_lock_free() const volatile
00420 { return _M_base.is_lock_free(); }
00421
00422 void
00423 store(bool __i, memory_order __m = memory_order_seq_cst) volatile
00424 { _M_base.store(__i, __m); }
00425
00426 bool
00427 load(memory_order __m = memory_order_seq_cst) const volatile
00428 { return _M_base.load(__m); }
00429
00430 bool
00431 exchange(bool __i, memory_order __m = memory_order_seq_cst) volatile
00432 { return _M_base.exchange(__i, __m); }
00433
00434 bool
00435 compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
00436 memory_order __m2) volatile
00437 { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
00438
00439 bool
00440 compare_exchange_weak(bool& __i1, bool __i2,
00441 memory_order __m = memory_order_seq_cst) volatile
00442 { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
00443
00444 bool
00445 compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
00446 memory_order __m2) volatile
00447 { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
00448
00449
00450 bool
00451 compare_exchange_strong(bool& __i1, bool __i2,
00452 memory_order __m = memory_order_seq_cst) volatile
00453 { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
00454 };
00455
00456 #undef _ATOMIC_LOAD_
00457 #undef _ATOMIC_STORE_
00458 #undef _ATOMIC_MODIFY_
00459 #undef _ATOMIC_CMPEXCHNG_
00460 }
00461
00462
00463
00464 #endif