atomic_2.h

Go to the documentation of this file.
00001 // -*- C++ -*- header.
00002 
00003 // Copyright (C) 2008, 2009
00004 // Free Software Foundation, Inc.
00005 //
00006 // This file is part of the GNU ISO C++ Library.  This library is free
00007 // software; you can redistribute it and/or modify it under the
00008 // terms of the GNU General Public License as published by the
00009 // Free Software Foundation; either version 3, or (at your option)
00010 // any later version.
00011 
00012 // This library is distributed in the hope that it will be useful,
00013 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00014 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00015 // GNU General Public License for more details.
00016 
00017 // Under Section 7 of GPL version 3, you are granted additional
00018 // permissions described in the GCC Runtime Library Exception, version
00019 // 3.1, as published by the Free Software Foundation.
00020 
00021 // You should have received a copy of the GNU General Public License and
00022 // a copy of the GCC Runtime Library Exception along with this program;
00023 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
00024 // <http://www.gnu.org/licenses/>.
00025 
00026 /** @file bits/atomic_2.h
00027  *  This is an internal header file, included by other library headers.
00028  *  You should not attempt to use it directly.
00029  */
00030 
00031 #ifndef _GLIBCXX_ATOMIC_2_H
00032 #define _GLIBCXX_ATOMIC_2_H 1
00033 
00034 #pragma GCC system_header
00035 
00036 // _GLIBCXX_BEGIN_NAMESPACE(std)
00037 
00038 // 2 == __atomic2 == Always lock-free
00039 // Assumed:
00040 // _GLIBCXX_ATOMIC_BUILTINS_1
00041 // _GLIBCXX_ATOMIC_BUILTINS_2
00042 // _GLIBCXX_ATOMIC_BUILTINS_4
00043 // _GLIBCXX_ATOMIC_BUILTINS_8
00044 namespace __atomic2
00045 {
00046   /// atomic_flag
00047   struct atomic_flag : public __atomic_flag_base
00048   {
00049     atomic_flag() = default;
00050     ~atomic_flag() = default;
00051     atomic_flag(const atomic_flag&) = delete;
00052     atomic_flag& operator=(const atomic_flag&) = delete;
00053 
00054     // Conversion to ATOMIC_FLAG_INIT.
00055     atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
00056 
00057     bool
00058     test_and_set(memory_order __m = memory_order_seq_cst) volatile
00059     {
00060       // Redundant synchronize if built-in for lock is a full barrier.
00061       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
00062     __sync_synchronize();
00063       return __sync_lock_test_and_set(&_M_i, 1);
00064     }
00065 
00066     void
00067     clear(memory_order __m = memory_order_seq_cst) volatile
00068     {
00069       __glibcxx_assert(__m != memory_order_consume);
00070       __glibcxx_assert(__m != memory_order_acquire);
00071       __glibcxx_assert(__m != memory_order_acq_rel);
00072 
00073       __sync_lock_release(&_M_i);
00074       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
00075     __sync_synchronize();
00076     }
00077   };
00078 
00079 
00080   /// 29.4.2, address types
00081   struct atomic_address
00082   {
00083   private:
00084     void* _M_i;
00085 
00086   public:
00087     atomic_address() = default;
00088     ~atomic_address() = default;
00089     atomic_address(const atomic_address&) = delete;
00090     atomic_address& operator=(const atomic_address&) = delete;
00091 
00092     atomic_address(void* __v) { _M_i = __v; }
00093 
00094     bool
00095     is_lock_free() const volatile
00096     { return true; }
00097 
00098     void
00099     store(void* __v, memory_order __m = memory_order_seq_cst) volatile
00100     {
00101       __glibcxx_assert(__m != memory_order_acquire);
00102       __glibcxx_assert(__m != memory_order_acq_rel);
00103       __glibcxx_assert(__m != memory_order_consume);
00104 
00105       if (__m == memory_order_relaxed)
00106     _M_i = __v;
00107       else
00108     {
00109       // write_mem_barrier();
00110       _M_i = __v;
00111       if (__m == memory_order_seq_cst)
00112         __sync_synchronize();
00113     }
00114     }
00115 
00116     void*
00117     load(memory_order __m = memory_order_seq_cst) const volatile
00118     {
00119       __glibcxx_assert(__m != memory_order_release);
00120       __glibcxx_assert(__m != memory_order_acq_rel);
00121 
00122       __sync_synchronize();
00123       void* __ret = _M_i;
00124       __sync_synchronize();
00125       return __ret;
00126     }
00127 
00128     void*
00129     exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
00130     {
00131       // XXX built-in assumes memory_order_acquire.
00132       return __sync_lock_test_and_set(&_M_i, __v);
00133     }
00134 
00135     bool
00136     compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
00137               memory_order __m2) volatile
00138     { return compare_exchange_strong(__v1, __v2, __m1, __m2); }
00139 
00140     bool
00141     compare_exchange_weak(void*& __v1, void* __v2,
00142               memory_order __m = memory_order_seq_cst) volatile
00143     {
00144       return compare_exchange_weak(__v1, __v2, __m,
00145                    __calculate_memory_order(__m));
00146     }
00147 
00148     bool
00149     compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
00150                 memory_order __m2) volatile
00151     {
00152       __glibcxx_assert(__m2 != memory_order_release);
00153       __glibcxx_assert(__m2 != memory_order_acq_rel);
00154       __glibcxx_assert(__m2 <= __m1);
00155 
00156       void* __v1o = __v1;
00157       void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
00158 
00159       // Assume extra stores (of same value) allowed in true case.
00160       __v1 = __v1n;
00161       return __v1o == __v1n;
00162     }
00163 
00164     bool
00165     compare_exchange_strong(void*& __v1, void* __v2,
00166               memory_order __m = memory_order_seq_cst) volatile
00167     {
00168       return compare_exchange_strong(__v1, __v2, __m,
00169                      __calculate_memory_order(__m));
00170     }
00171 
00172     void*
00173     fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
00174     { return __sync_fetch_and_add(&_M_i, __d); }
00175 
00176     void*
00177     fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
00178     { return __sync_fetch_and_sub(&_M_i, __d); }
00179 
00180     operator void*() const volatile
00181     { return load(); }
00182 
00183     void*
00184     operator=(void* __v) // XXX volatile
00185     {
00186       store(__v);
00187       return __v;
00188     }
00189 
00190     void*
00191     operator+=(ptrdiff_t __d) volatile
00192     { return __sync_add_and_fetch(&_M_i, __d); }
00193 
00194     void*
00195     operator-=(ptrdiff_t __d) volatile
00196     { return __sync_sub_and_fetch(&_M_i, __d); }
00197   };
00198 
00199   // 29.3.1 atomic integral types
00200   // For each of the integral types, define atomic_[integral type] struct
00201   //
00202   // atomic_bool     bool
00203   // atomic_char     char
00204   // atomic_schar    signed char
00205   // atomic_uchar    unsigned char
00206   // atomic_short    short
00207   // atomic_ushort   unsigned short
00208   // atomic_int      int
00209   // atomic_uint     unsigned int
00210   // atomic_long     long
00211   // atomic_ulong    unsigned long
00212   // atomic_llong    long long
00213   // atomic_ullong   unsigned long long
00214   // atomic_char16_t char16_t
00215   // atomic_char32_t char32_t
00216   // atomic_wchar_t  wchar_t
00217 
00218   // Base type.
00219   // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 8 bytes,
00220   // since that is what GCC built-in functions for atomic memory access work on.
00221   template<typename _ITp>
00222     struct __atomic_base
00223     {
00224     private:
00225       typedef _ITp  __integral_type;
00226 
00227       __integral_type   _M_i;
00228 
00229     public:
00230       __atomic_base() = default;
00231       ~__atomic_base() = default;
00232       __atomic_base(const __atomic_base&) = delete;
00233       __atomic_base& operator=(const __atomic_base&) = delete;
00234 
00235       // Requires __integral_type convertible to _M_base._M_i.
00236       __atomic_base(__integral_type __i) { _M_i = __i; }
00237 
00238       operator __integral_type() const volatile
00239       { return load(); }
00240 
00241       __integral_type
00242       operator=(__integral_type __i) // XXX volatile
00243       {
00244     store(__i);
00245     return __i;
00246       }
00247 
00248       __integral_type
00249       operator++(int) volatile
00250       { return fetch_add(1); }
00251 
00252       __integral_type
00253       operator--(int) volatile
00254       { return fetch_sub(1); }
00255 
00256       __integral_type
00257       operator++() volatile
00258       { return __sync_add_and_fetch(&_M_i, 1); }
00259 
00260       __integral_type
00261       operator--() volatile
00262       { return __sync_sub_and_fetch(&_M_i, 1); }
00263 
00264       __integral_type
00265       operator+=(__integral_type __i) volatile
00266       { return __sync_add_and_fetch(&_M_i, __i); }
00267 
00268       __integral_type
00269       operator-=(__integral_type __i) volatile
00270       { return __sync_sub_and_fetch(&_M_i, __i); }
00271 
00272       __integral_type
00273       operator&=(__integral_type __i) volatile
00274       { return __sync_and_and_fetch(&_M_i, __i); }
00275 
00276       __integral_type
00277       operator|=(__integral_type __i) volatile
00278       { return __sync_or_and_fetch(&_M_i, __i); }
00279 
00280       __integral_type
00281       operator^=(__integral_type __i) volatile
00282       { return __sync_xor_and_fetch(&_M_i, __i); }
00283 
00284       bool
00285       is_lock_free() const volatile
00286       { return true; }
00287 
00288       void
00289       store(__integral_type __i,
00290         memory_order __m = memory_order_seq_cst) volatile
00291       {
00292     __glibcxx_assert(__m != memory_order_acquire);
00293     __glibcxx_assert(__m != memory_order_acq_rel);
00294     __glibcxx_assert(__m != memory_order_consume);
00295 
00296     if (__m == memory_order_relaxed)
00297       _M_i = __i;
00298     else
00299       {
00300         // write_mem_barrier();
00301         _M_i = __i;
00302         if (__m == memory_order_seq_cst)
00303           __sync_synchronize();
00304       }
00305       }
00306 
00307       __integral_type
00308       load(memory_order __m = memory_order_seq_cst) const volatile
00309       {
00310     __glibcxx_assert(__m != memory_order_release);
00311     __glibcxx_assert(__m != memory_order_acq_rel);
00312 
00313     __sync_synchronize();
00314     __integral_type __ret = _M_i;
00315     __sync_synchronize();
00316     return __ret;
00317       }
00318 
00319       __integral_type
00320       exchange(__integral_type __i,
00321            memory_order __m = memory_order_seq_cst) volatile
00322       {
00323     // XXX built-in assumes memory_order_acquire.
00324     return __sync_lock_test_and_set(&_M_i, __i);
00325       }
00326 
00327       bool
00328       compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
00329                 memory_order __m1, memory_order __m2) volatile
00330       { return compare_exchange_strong(__i1, __i2, __m1, __m2); }
00331 
00332       bool
00333       compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
00334                 memory_order __m = memory_order_seq_cst) volatile
00335       {
00336     return compare_exchange_weak(__i1, __i2, __m,
00337                      __calculate_memory_order(__m));
00338       }
00339 
00340       bool
00341       compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
00342                   memory_order __m1, memory_order __m2) volatile
00343       {
00344     __glibcxx_assert(__m2 != memory_order_release);
00345     __glibcxx_assert(__m2 != memory_order_acq_rel);
00346     __glibcxx_assert(__m2 <= __m1);
00347 
00348     __integral_type __i1o = __i1;
00349     __integral_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
00350 
00351     // Assume extra stores (of same value) allowed in true case.
00352     __i1 = __i1n;
00353     return __i1o == __i1n;
00354       }
00355 
00356       bool
00357       compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
00358                   memory_order __m = memory_order_seq_cst) volatile
00359       {
00360     return compare_exchange_strong(__i1, __i2, __m,
00361                        __calculate_memory_order(__m));
00362       }
00363 
00364       __integral_type
00365       fetch_add(__integral_type __i,
00366         memory_order __m = memory_order_seq_cst) volatile
00367       { return __sync_fetch_and_add(&_M_i, __i); }
00368 
00369       __integral_type
00370       fetch_sub(__integral_type __i,
00371         memory_order __m = memory_order_seq_cst) volatile
00372       { return __sync_fetch_and_sub(&_M_i, __i); }
00373 
00374       __integral_type
00375       fetch_and(__integral_type __i,
00376         memory_order __m = memory_order_seq_cst) volatile
00377       { return __sync_fetch_and_and(&_M_i, __i); }
00378 
00379       __integral_type
00380       fetch_or(__integral_type __i,
00381            memory_order __m = memory_order_seq_cst) volatile
00382       { return __sync_fetch_and_or(&_M_i, __i); }
00383 
00384       __integral_type
00385       fetch_xor(__integral_type __i,
00386         memory_order __m = memory_order_seq_cst) volatile
00387       { return __sync_fetch_and_xor(&_M_i, __i); }
00388     };
00389 
00390 
00391   /// atomic_bool
00392   // NB: No operators or fetch-operations for this type.
00393   struct atomic_bool
00394   {
00395   private:
00396     __atomic_base<bool> _M_base;
00397 
00398   public:
00399     atomic_bool() = default;
00400     ~atomic_bool() = default;
00401     atomic_bool(const atomic_bool&) = delete;
00402     atomic_bool& operator=(const atomic_bool&) = delete;
00403 
00404     atomic_bool(bool __i) : _M_base(__i) { }
00405 
00406     bool
00407     operator=(bool __i) // XXX volatile
00408     { return _M_base.operator=(__i); }
00409 
00410     operator bool() const volatile
00411     { return _M_base.load(); }
00412 
00413     bool
00414     is_lock_free() const volatile
00415     { return _M_base.is_lock_free(); }
00416 
00417     void
00418     store(bool __i, memory_order __m = memory_order_seq_cst) volatile
00419     { _M_base.store(__i, __m); }
00420 
00421     bool
00422     load(memory_order __m = memory_order_seq_cst) const volatile
00423     { return _M_base.load(__m); }
00424 
00425     bool
00426     exchange(bool __i, memory_order __m = memory_order_seq_cst) volatile
00427     { return _M_base.exchange(__i, __m); }
00428 
00429     bool
00430     compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
00431               memory_order __m2) volatile
00432     { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
00433 
00434     bool
00435     compare_exchange_weak(bool& __i1, bool __i2,
00436               memory_order __m = memory_order_seq_cst) volatile
00437     { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
00438 
00439     bool
00440     compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
00441                 memory_order __m2) volatile
00442     { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
00443 
00444 
00445     bool
00446     compare_exchange_strong(bool& __i1, bool __i2,
00447                 memory_order __m = memory_order_seq_cst) volatile
00448     { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
00449   };
00450 } // namespace __atomic2
00451 
00452 // _GLIBCXX_END_NAMESPACE
00453 
00454 #endif

Generated on 19 Jun 2018 for libstdc++ by  doxygen 1.6.1