libstdc++
shared_mutex
Go to the documentation of this file.
00001 // <shared_mutex> -*- C++ -*-
00002 
00003 // Copyright (C) 2013-2019 Free Software Foundation, Inc.
00004 //
00005 // This file is part of the GNU ISO C++ Library.  This library is free
00006 // software; you can redistribute it and/or modify it under the
00007 // terms of the GNU General Public License as published by the
00008 // Free Software Foundation; either version 3, or (at your option)
00009 // any later version.
00010 
00011 // This library is distributed in the hope that it will be useful,
00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00014 // GNU General Public License for more details.
00015 
00016 // Under Section 7 of GPL version 3, you are granted additional
00017 // permissions described in the GCC Runtime Library Exception, version
00018 // 3.1, as published by the Free Software Foundation.
00019 
00020 // You should have received a copy of the GNU General Public License and
00021 // a copy of the GCC Runtime Library Exception along with this program;
00022 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
00023 // <http://www.gnu.org/licenses/>.
00024 
00025 /** @file include/shared_mutex
00026  *  This is a Standard C++ Library header.
00027  */
00028 
00029 #ifndef _GLIBCXX_SHARED_MUTEX
00030 #define _GLIBCXX_SHARED_MUTEX 1
00031 
00032 #pragma GCC system_header
00033 
00034 #if __cplusplus >= 201402L
00035 
00036 #include <bits/c++config.h>
00037 #include <condition_variable>
00038 #include <bits/functexcept.h>
00039 
00040 namespace std _GLIBCXX_VISIBILITY(default)
00041 {
00042 _GLIBCXX_BEGIN_NAMESPACE_VERSION
00043 
00044   /**
00045    * @ingroup mutexes
00046    * @{
00047    */
00048 
00049 #ifdef _GLIBCXX_HAS_GTHREADS
00050 
00051 #if __cplusplus >= 201703L
00052 #define __cpp_lib_shared_mutex 201505
00053   class shared_mutex;
00054 #endif
00055 
00056 #define __cpp_lib_shared_timed_mutex 201402
00057   class shared_timed_mutex;
00058 
00059 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
00060 #ifdef __gthrw
00061 #define _GLIBCXX_GTHRW(name) \
00062   __gthrw(pthread_ ## name); \
00063   static inline int \
00064   __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
00065   { \
00066     if (__gthread_active_p ()) \
00067       return __gthrw_(pthread_ ## name) (__rwlock); \
00068     else \
00069       return 0; \
00070   }
00071   _GLIBCXX_GTHRW(rwlock_rdlock)
00072   _GLIBCXX_GTHRW(rwlock_tryrdlock)
00073   _GLIBCXX_GTHRW(rwlock_wrlock)
00074   _GLIBCXX_GTHRW(rwlock_trywrlock)
00075   _GLIBCXX_GTHRW(rwlock_unlock)
00076 # ifndef PTHREAD_RWLOCK_INITIALIZER
00077   _GLIBCXX_GTHRW(rwlock_destroy)
00078   __gthrw(pthread_rwlock_init);
00079   static inline int
00080   __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
00081   {
00082     if (__gthread_active_p ())
00083       return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
00084     else
00085       return 0;
00086   }
00087 # endif
00088 # if _GTHREAD_USE_MUTEX_TIMEDLOCK
00089    __gthrw(pthread_rwlock_timedrdlock);
00090   static inline int
00091   __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
00092                                 const timespec *__ts)
00093   {
00094     if (__gthread_active_p ())
00095       return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
00096     else
00097       return 0;
00098   }
00099    __gthrw(pthread_rwlock_timedwrlock);
00100   static inline int
00101   __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
00102                                 const timespec *__ts)
00103   {
00104     if (__gthread_active_p ())
00105       return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
00106     else
00107       return 0;
00108   }
00109 # endif
00110 #else
00111   static inline int
00112   __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
00113   { return pthread_rwlock_rdlock (__rwlock); }
00114   static inline int
00115   __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
00116   { return pthread_rwlock_tryrdlock (__rwlock); }
00117   static inline int
00118   __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
00119   { return pthread_rwlock_wrlock (__rwlock); }
00120   static inline int
00121   __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
00122   { return pthread_rwlock_trywrlock (__rwlock); }
00123   static inline int
00124   __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
00125   { return pthread_rwlock_unlock (__rwlock); }
00126   static inline int
00127   __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
00128   { return pthread_rwlock_destroy (__rwlock); }
00129   static inline int
00130   __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
00131   { return pthread_rwlock_init (__rwlock, NULL); }
00132 # if _GTHREAD_USE_MUTEX_TIMEDLOCK
00133   static inline int
00134   __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
00135                                 const timespec *__ts)
00136   { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
00137   static inline int
00138   __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
00139                                 const timespec *__ts)
00140   { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
00141 # endif
00142 #endif
00143 
00144   /// A shared mutex type implemented using pthread_rwlock_t.
00145   class __shared_mutex_pthread
00146   {
00147     friend class shared_timed_mutex;
00148 
00149 #ifdef PTHREAD_RWLOCK_INITIALIZER
00150     pthread_rwlock_t    _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
00151 
00152   public:
00153     __shared_mutex_pthread() = default;
00154     ~__shared_mutex_pthread() = default;
00155 #else
00156     pthread_rwlock_t    _M_rwlock;
00157 
00158   public:
00159     __shared_mutex_pthread()
00160     {
00161       int __ret = __glibcxx_rwlock_init(&_M_rwlock, NULL);
00162       if (__ret == ENOMEM)
00163         __throw_bad_alloc();
00164       else if (__ret == EAGAIN)
00165         __throw_system_error(int(errc::resource_unavailable_try_again));
00166       else if (__ret == EPERM)
00167         __throw_system_error(int(errc::operation_not_permitted));
00168       // Errors not handled: EBUSY, EINVAL
00169       __glibcxx_assert(__ret == 0);
00170     }
00171 
00172     ~__shared_mutex_pthread()
00173     {
00174       int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
00175       // Errors not handled: EBUSY, EINVAL
00176       __glibcxx_assert(__ret == 0);
00177     }
00178 #endif
00179 
00180     __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
00181     __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
00182 
00183     void
00184     lock()
00185     {
00186       int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
00187       if (__ret == EDEADLK)
00188         __throw_system_error(int(errc::resource_deadlock_would_occur));
00189       // Errors not handled: EINVAL
00190       __glibcxx_assert(__ret == 0);
00191     }
00192 
00193     bool
00194     try_lock()
00195     {
00196       int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
00197       if (__ret == EBUSY) return false;
00198       // Errors not handled: EINVAL
00199       __glibcxx_assert(__ret == 0);
00200       return true;
00201     }
00202 
00203     void
00204     unlock()
00205     {
00206       int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
00207       // Errors not handled: EPERM, EBUSY, EINVAL
00208       __glibcxx_assert(__ret == 0);
00209     }
00210 
00211     // Shared ownership
00212 
00213     void
00214     lock_shared()
00215     {
00216       int __ret;
00217       // We retry if we exceeded the maximum number of read locks supported by
00218       // the POSIX implementation; this can result in busy-waiting, but this
00219       // is okay based on the current specification of forward progress
00220       // guarantees by the standard.
00221       do
00222         __ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
00223       while (__ret == EAGAIN);
00224       if (__ret == EDEADLK)
00225         __throw_system_error(int(errc::resource_deadlock_would_occur));
00226       // Errors not handled: EINVAL
00227       __glibcxx_assert(__ret == 0);
00228     }
00229 
00230     bool
00231     try_lock_shared()
00232     {
00233       int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
00234       // If the maximum number of read locks has been exceeded, we just fail
00235       // to acquire the lock.  Unlike for lock(), we are not allowed to throw
00236       // an exception.
00237       if (__ret == EBUSY || __ret == EAGAIN) return false;
00238       // Errors not handled: EINVAL
00239       __glibcxx_assert(__ret == 0);
00240       return true;
00241     }
00242 
00243     void
00244     unlock_shared()
00245     {
00246       unlock();
00247     }
00248 
00249     void* native_handle() { return &_M_rwlock; }
00250   };
00251 #endif
00252 
00253 #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
00254   /// A shared mutex type implemented using std::condition_variable.
00255   class __shared_mutex_cv
00256   {
00257     friend class shared_timed_mutex;
00258 
00259     // Based on Howard Hinnant's reference implementation from N2406.
00260 
00261     // The high bit of _M_state is the write-entered flag which is set to
00262     // indicate a writer has taken the lock or is queuing to take the lock.
00263     // The remaining bits are the count of reader locks.
00264     //
00265     // To take a reader lock, block on gate1 while the write-entered flag is
00266     // set or the maximum number of reader locks is held, then increment the
00267     // reader lock count.
00268     // To release, decrement the count, then if the write-entered flag is set
00269     // and the count is zero then signal gate2 to wake a queued writer,
00270     // otherwise if the maximum number of reader locks was held signal gate1
00271     // to wake a reader.
00272     //
00273     // To take a writer lock, block on gate1 while the write-entered flag is
00274     // set, then set the write-entered flag to start queueing, then block on
00275     // gate2 while the number of reader locks is non-zero.
00276     // To release, unset the write-entered flag and signal gate1 to wake all
00277     // blocked readers and writers.
00278     //
00279     // This means that when no reader locks are held readers and writers get
00280     // equal priority. When one or more reader locks is held a writer gets
00281     // priority and no more reader locks can be taken while the writer is
00282     // queued.
00283 
00284     // Only locked when accessing _M_state or waiting on condition variables.
00285     mutex               _M_mut;
00286     // Used to block while write-entered is set or reader count at maximum.
00287     condition_variable  _M_gate1;
00288     // Used to block queued writers while reader count is non-zero.
00289     condition_variable  _M_gate2;
00290     // The write-entered flag and reader count.
00291     unsigned            _M_state;
00292 
00293     static constexpr unsigned _S_write_entered
00294       = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
00295     static constexpr unsigned _S_max_readers = ~_S_write_entered;
00296 
00297     // Test whether the write-entered flag is set. _M_mut must be locked.
00298     bool _M_write_entered() const { return _M_state & _S_write_entered; }
00299 
00300     // The number of reader locks currently held. _M_mut must be locked.
00301     unsigned _M_readers() const { return _M_state & _S_max_readers; }
00302 
00303   public:
00304     __shared_mutex_cv() : _M_state(0) {}
00305 
00306     ~__shared_mutex_cv()
00307     {
00308       __glibcxx_assert( _M_state == 0 );
00309     }
00310 
00311     __shared_mutex_cv(const __shared_mutex_cv&) = delete;
00312     __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
00313 
00314     // Exclusive ownership
00315 
00316     void
00317     lock()
00318     {
00319       unique_lock<mutex> __lk(_M_mut);
00320       // Wait until we can set the write-entered flag.
00321       _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
00322       _M_state |= _S_write_entered;
00323       // Then wait until there are no more readers.
00324       _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
00325     }
00326 
00327     bool
00328     try_lock()
00329     {
00330       unique_lock<mutex> __lk(_M_mut, try_to_lock);
00331       if (__lk.owns_lock() && _M_state == 0)
00332         {
00333           _M_state = _S_write_entered;
00334           return true;
00335         }
00336       return false;
00337     }
00338 
00339     void
00340     unlock()
00341     {
00342       lock_guard<mutex> __lk(_M_mut);
00343       __glibcxx_assert( _M_write_entered() );
00344       _M_state = 0;
00345       // call notify_all() while mutex is held so that another thread can't
00346       // lock and unlock the mutex then destroy *this before we make the call.
00347       _M_gate1.notify_all();
00348     }
00349 
00350     // Shared ownership
00351 
00352     void
00353     lock_shared()
00354     {
00355       unique_lock<mutex> __lk(_M_mut);
00356       _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
00357       ++_M_state;
00358     }
00359 
00360     bool
00361     try_lock_shared()
00362     {
00363       unique_lock<mutex> __lk(_M_mut, try_to_lock);
00364       if (!__lk.owns_lock())
00365         return false;
00366       if (_M_state < _S_max_readers)
00367         {
00368           ++_M_state;
00369           return true;
00370         }
00371       return false;
00372     }
00373 
00374     void
00375     unlock_shared()
00376     {
00377       lock_guard<mutex> __lk(_M_mut);
00378       __glibcxx_assert( _M_readers() > 0 );
00379       auto __prev = _M_state--;
00380       if (_M_write_entered())
00381         {
00382           // Wake the queued writer if there are no more readers.
00383           if (_M_readers() == 0)
00384             _M_gate2.notify_one();
00385           // No need to notify gate1 because we give priority to the queued
00386           // writer, and that writer will eventually notify gate1 after it
00387           // clears the write-entered flag.
00388         }
00389       else
00390         {
00391           // Wake any thread that was blocked on reader overflow.
00392           if (__prev == _S_max_readers)
00393             _M_gate1.notify_one();
00394         }
00395     }
00396   };
00397 #endif
00398 
00399 #if __cplusplus > 201402L
00400   /// The standard shared mutex type.
00401   class shared_mutex
00402   {
00403   public:
00404     shared_mutex() = default;
00405     ~shared_mutex() = default;
00406 
00407     shared_mutex(const shared_mutex&) = delete;
00408     shared_mutex& operator=(const shared_mutex&) = delete;
00409 
00410     // Exclusive ownership
00411 
00412     void lock() { _M_impl.lock(); }
00413     bool try_lock() { return _M_impl.try_lock(); }
00414     void unlock() { _M_impl.unlock(); }
00415 
00416     // Shared ownership
00417 
00418     void lock_shared() { _M_impl.lock_shared(); }
00419     bool try_lock_shared() { return _M_impl.try_lock_shared(); }
00420     void unlock_shared() { _M_impl.unlock_shared(); }
00421 
00422 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
00423     typedef void* native_handle_type;
00424     native_handle_type native_handle() { return _M_impl.native_handle(); }
00425 
00426   private:
00427     __shared_mutex_pthread _M_impl;
00428 #else
00429   private:
00430     __shared_mutex_cv _M_impl;
00431 #endif
00432   };
00433 #endif // C++17
00434 
00435 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
00436   using __shared_timed_mutex_base = __shared_mutex_pthread;
00437 #else
00438   using __shared_timed_mutex_base = __shared_mutex_cv;
00439 #endif
00440 
00441   /// The standard shared timed mutex type.
00442   class shared_timed_mutex
00443   : private __shared_timed_mutex_base
00444   {
00445     using _Base = __shared_timed_mutex_base;
00446 
00447     // Must use the same clock as condition_variable for __shared_mutex_cv.
00448     typedef chrono::system_clock        __clock_t;
00449 
00450   public:
00451     shared_timed_mutex() = default;
00452     ~shared_timed_mutex() = default;
00453 
00454     shared_timed_mutex(const shared_timed_mutex&) = delete;
00455     shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
00456 
00457     // Exclusive ownership
00458 
00459     void lock() { _Base::lock(); }
00460     bool try_lock() { return _Base::try_lock(); }
00461     void unlock() { _Base::unlock(); }
00462 
00463     template<typename _Rep, typename _Period>
00464       bool
00465       try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
00466       {
00467         return try_lock_until(__clock_t::now() + __rel_time);
00468       }
00469 
00470     // Shared ownership
00471 
00472     void lock_shared() { _Base::lock_shared(); }
00473     bool try_lock_shared() { return _Base::try_lock_shared(); }
00474     void unlock_shared() { _Base::unlock_shared(); }
00475 
00476     template<typename _Rep, typename _Period>
00477       bool
00478       try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
00479       {
00480         return try_lock_shared_until(__clock_t::now() + __rel_time);
00481       }
00482 
00483 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
00484 
00485     // Exclusive ownership
00486 
00487     template<typename _Duration>
00488       bool
00489       try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
00490       {
00491         auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
00492         auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
00493 
00494         __gthread_time_t __ts =
00495           {
00496             static_cast<std::time_t>(__s.time_since_epoch().count()),
00497             static_cast<long>(__ns.count())
00498           };
00499 
00500         int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
00501         // On self-deadlock, we just fail to acquire the lock.  Technically,
00502         // the program violated the precondition.
00503         if (__ret == ETIMEDOUT || __ret == EDEADLK)
00504           return false;
00505         // Errors not handled: EINVAL
00506         __glibcxx_assert(__ret == 0);
00507         return true;
00508       }
00509 
00510     template<typename _Clock, typename _Duration>
00511       bool
00512       try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
00513       {
00514         // DR 887 - Sync unknown clock to known clock.
00515         const typename _Clock::time_point __c_entry = _Clock::now();
00516         const __clock_t::time_point __s_entry = __clock_t::now();
00517         const auto __delta = __abs_time - __c_entry;
00518         const auto __s_atime = __s_entry + __delta;
00519         return try_lock_until(__s_atime);
00520       }
00521 
00522     // Shared ownership
00523 
00524     template<typename _Duration>
00525       bool
00526       try_lock_shared_until(const chrono::time_point<__clock_t,
00527                             _Duration>& __atime)
00528       {
00529         auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
00530         auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
00531 
00532         __gthread_time_t __ts =
00533           {
00534             static_cast<std::time_t>(__s.time_since_epoch().count()),
00535             static_cast<long>(__ns.count())
00536           };
00537 
00538         int __ret;
00539         // Unlike for lock(), we are not allowed to throw an exception so if
00540         // the maximum number of read locks has been exceeded, or we would
00541         // deadlock, we just try to acquire the lock again (and will time out
00542         // eventually).
00543         // In cases where we would exceed the maximum number of read locks
00544         // throughout the whole time until the timeout, we will fail to
00545         // acquire the lock even if it would be logically free; however, this
00546         // is allowed by the standard, and we made a "strong effort"
00547         // (see C++14 30.4.1.4p26).
00548         // For cases where the implementation detects a deadlock we
00549         // intentionally block and timeout so that an early return isn't
00550         // mistaken for a spurious failure, which might help users realise
00551         // there is a deadlock.
00552         do
00553           __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
00554         while (__ret == EAGAIN || __ret == EDEADLK);
00555         if (__ret == ETIMEDOUT)
00556           return false;
00557         // Errors not handled: EINVAL
00558         __glibcxx_assert(__ret == 0);
00559         return true;
00560       }
00561 
00562     template<typename _Clock, typename _Duration>
00563       bool
00564       try_lock_shared_until(const chrono::time_point<_Clock,
00565                                                      _Duration>& __abs_time)
00566       {
00567         // DR 887 - Sync unknown clock to known clock.
00568         const typename _Clock::time_point __c_entry = _Clock::now();
00569         const __clock_t::time_point __s_entry = __clock_t::now();
00570         const auto __delta = __abs_time - __c_entry;
00571         const auto __s_atime = __s_entry + __delta;
00572         return try_lock_shared_until(__s_atime);
00573       }
00574 
00575 #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
00576 
00577     // Exclusive ownership
00578 
00579     template<typename _Clock, typename _Duration>
00580       bool
00581       try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
00582       {
00583         unique_lock<mutex> __lk(_M_mut);
00584         if (!_M_gate1.wait_until(__lk, __abs_time,
00585                                  [=]{ return !_M_write_entered(); }))
00586           {
00587             return false;
00588           }
00589         _M_state |= _S_write_entered;
00590         if (!_M_gate2.wait_until(__lk, __abs_time,
00591                                  [=]{ return _M_readers() == 0; }))
00592           {
00593             _M_state ^= _S_write_entered;
00594             // Wake all threads blocked while the write-entered flag was set.
00595             _M_gate1.notify_all();
00596             return false;
00597           }
00598         return true;
00599       }
00600 
00601     // Shared ownership
00602 
00603     template <typename _Clock, typename _Duration>
00604       bool
00605       try_lock_shared_until(const chrono::time_point<_Clock,
00606                                                      _Duration>& __abs_time)
00607       {
00608         unique_lock<mutex> __lk(_M_mut);
00609         if (!_M_gate1.wait_until(__lk, __abs_time,
00610                                  [=]{ return _M_state < _S_max_readers; }))
00611           {
00612             return false;
00613           }
00614         ++_M_state;
00615         return true;
00616       }
00617 
00618 #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
00619   };
00620 #endif // _GLIBCXX_HAS_GTHREADS
00621 
00622   /// shared_lock
00623   template<typename _Mutex>
00624     class shared_lock
00625     {
00626     public:
00627       typedef _Mutex mutex_type;
00628 
00629       // Shared locking
00630 
00631       shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
00632 
00633       explicit
00634       shared_lock(mutex_type& __m)
00635       : _M_pm(std::__addressof(__m)), _M_owns(true)
00636       { __m.lock_shared(); }
00637 
00638       shared_lock(mutex_type& __m, defer_lock_t) noexcept
00639       : _M_pm(std::__addressof(__m)), _M_owns(false) { }
00640 
00641       shared_lock(mutex_type& __m, try_to_lock_t)
00642       : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
00643 
00644       shared_lock(mutex_type& __m, adopt_lock_t)
00645       : _M_pm(std::__addressof(__m)), _M_owns(true) { }
00646 
00647       template<typename _Clock, typename _Duration>
00648         shared_lock(mutex_type& __m,
00649                     const chrono::time_point<_Clock, _Duration>& __abs_time)
00650       : _M_pm(std::__addressof(__m)),
00651         _M_owns(__m.try_lock_shared_until(__abs_time)) { }
00652 
00653       template<typename _Rep, typename _Period>
00654         shared_lock(mutex_type& __m,
00655                     const chrono::duration<_Rep, _Period>& __rel_time)
00656       : _M_pm(std::__addressof(__m)),
00657         _M_owns(__m.try_lock_shared_for(__rel_time)) { }
00658 
00659       ~shared_lock()
00660       {
00661         if (_M_owns)
00662           _M_pm->unlock_shared();
00663       }
00664 
00665       shared_lock(shared_lock const&) = delete;
00666       shared_lock& operator=(shared_lock const&) = delete;
00667 
00668       shared_lock(shared_lock&& __sl) noexcept : shared_lock()
00669       { swap(__sl); }
00670 
00671       shared_lock&
00672       operator=(shared_lock&& __sl) noexcept
00673       {
00674         shared_lock(std::move(__sl)).swap(*this);
00675         return *this;
00676       }
00677 
00678       void
00679       lock()
00680       {
00681         _M_lockable();
00682         _M_pm->lock_shared();
00683         _M_owns = true;
00684       }
00685 
00686       bool
00687       try_lock()
00688       {
00689         _M_lockable();
00690         return _M_owns = _M_pm->try_lock_shared();
00691       }
00692 
00693       template<typename _Rep, typename _Period>
00694         bool
00695         try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
00696         {
00697           _M_lockable();
00698           return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
00699         }
00700 
00701       template<typename _Clock, typename _Duration>
00702         bool
00703         try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
00704         {
00705           _M_lockable();
00706           return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
00707         }
00708 
00709       void
00710       unlock()
00711       {
00712         if (!_M_owns)
00713           __throw_system_error(int(errc::resource_deadlock_would_occur));
00714         _M_pm->unlock_shared();
00715         _M_owns = false;
00716       }
00717 
00718       // Setters
00719 
00720       void
00721       swap(shared_lock& __u) noexcept
00722       {
00723         std::swap(_M_pm, __u._M_pm);
00724         std::swap(_M_owns, __u._M_owns);
00725       }
00726 
00727       mutex_type*
00728       release() noexcept
00729       {
00730         _M_owns = false;
00731         return std::exchange(_M_pm, nullptr);
00732       }
00733 
00734       // Getters
00735 
00736       bool owns_lock() const noexcept { return _M_owns; }
00737 
00738       explicit operator bool() const noexcept { return _M_owns; }
00739 
00740       mutex_type* mutex() const noexcept { return _M_pm; }
00741 
00742     private:
00743       void
00744       _M_lockable() const
00745       {
00746         if (_M_pm == nullptr)
00747           __throw_system_error(int(errc::operation_not_permitted));
00748         if (_M_owns)
00749           __throw_system_error(int(errc::resource_deadlock_would_occur));
00750       }
00751 
00752       mutex_type*       _M_pm;
00753       bool              _M_owns;
00754     };
00755 
00756   /// Swap specialization for shared_lock
00757   template<typename _Mutex>
00758     void
00759     swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
00760     { __x.swap(__y); }
00761 
00762   // @} group mutexes
00763 _GLIBCXX_END_NAMESPACE_VERSION
00764 } // namespace
00765 
00766 #endif // C++14
00767 
00768 #endif // _GLIBCXX_SHARED_MUTEX