libstdc++
atomic_futex.h
Go to the documentation of this file.
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2015 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_futex.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly.
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_FUTEX_H
31 #define _GLIBCXX_ATOMIC_FUTEX_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/c++config.h>
36 #include <atomic>
37 #include <chrono>
38 #if ! (defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1)
39 #include <mutex>
40 #include <condition_variable>
41 #endif
42 
43 #ifndef _GLIBCXX_ALWAYS_INLINE
44 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
45 #endif
46 
47 namespace std _GLIBCXX_VISIBILITY(default)
48 {
49 _GLIBCXX_BEGIN_NAMESPACE_VERSION
50 
51 #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
52 #if defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
53  struct __atomic_futex_unsigned_base
54  {
55  // Returns false iff a timeout occurred.
56  bool
57  _M_futex_wait_until(unsigned *__addr, unsigned __val, bool __has_timeout,
58  chrono::seconds __s, chrono::nanoseconds __ns);
59 
60  // This can be executed after the object has been destroyed.
61  static void _M_futex_notify_all(unsigned* __addr);
62  };
63 
64  template <unsigned _Waiter_bit = 0x80000000>
65  class __atomic_futex_unsigned : __atomic_futex_unsigned_base
66  {
67  typedef chrono::system_clock __clock_t;
68 
69  // This must be lock-free and at offset 0.
70  atomic<unsigned> _M_data;
71 
72  public:
73  explicit
74  __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
75  { }
76 
77  _GLIBCXX_ALWAYS_INLINE unsigned
78  _M_load(memory_order __mo)
79  {
80  return _M_data.load(__mo) & ~_Waiter_bit;
81  }
82 
83  private:
84  // If a timeout occurs, returns a current value after the timeout;
85  // otherwise, returns the operand's value if equal is true or a different
86  // value if equal is false.
87  // The assumed value is the caller's assumption about the current value
88  // when making the call.
89  unsigned
90  _M_load_and_test_until(unsigned __assumed, unsigned __operand,
91  bool __equal, memory_order __mo, bool __has_timeout,
92  chrono::seconds __s, chrono::nanoseconds __ns)
93  {
94  for (;;)
95  {
96  // Don't bother checking the value again because we expect the caller to
97  // have done it recently.
98  // memory_order_relaxed is sufficient because we can rely on just the
99  // modification order (store_notify uses an atomic RMW operation too),
100  // and the futex syscalls synchronize between themselves.
101  _M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
102  bool __ret;
103  __ret = _M_futex_wait_until((unsigned*)(void*)&_M_data,
104  __assumed | _Waiter_bit, __has_timeout, __s, __ns);
105  // Fetch the current value after waiting (clears _Waiter_bit).
106  __assumed = _M_load(__mo);
107  if (!__ret || ((__operand == __assumed) == __equal))
108  return __assumed;
109  // TODO adapt wait time
110  }
111  }
112 
113  // Returns the operand's value if equal is true or a different value if
114  // equal is false.
115  // The assumed value is the caller's assumption about the current value
116  // when making the call.
117  unsigned
118  _M_load_and_test(unsigned __assumed, unsigned __operand,
119  bool __equal, memory_order __mo)
120  {
121  return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
122  false, chrono::seconds(0), chrono::nanoseconds(0));
123  }
124 
125  // If a timeout occurs, returns a current value after the timeout;
126  // otherwise, returns the operand's value if equal is true or a different
127  // value if equal is false.
128  // The assumed value is the caller's assumption about the current value
129  // when making the call.
130  template<typename _Dur>
131  unsigned
132  _M_load_and_test_until_impl(unsigned __assumed, unsigned __operand,
133  bool __equal, memory_order __mo,
134  const chrono::time_point<__clock_t, _Dur>& __atime)
135  {
136  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
137  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
138  // XXX correct?
139  return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
140  true, __s.time_since_epoch(), __ns);
141  }
142 
143  public:
144 
145  _GLIBCXX_ALWAYS_INLINE unsigned
146  _M_load_when_not_equal(unsigned __val, memory_order __mo)
147  {
148  unsigned __i = _M_load(__mo);
149  if ((__i & ~_Waiter_bit) != __val) return;
150  // TODO Spin-wait first.
151  return _M_load_and_test(__i, __val, false, __mo);
152  }
153 
154  _GLIBCXX_ALWAYS_INLINE void
155  _M_load_when_equal(unsigned __val, memory_order __mo)
156  {
157  unsigned __i = _M_load(__mo);
158  if ((__i & ~_Waiter_bit) == __val)
159  return;
160  // TODO Spin-wait first.
161  _M_load_and_test(__i, __val, true, __mo);
162  }
163 
164  // Returns false iff a timeout occurred.
165  template<typename _Rep, typename _Period>
166  _GLIBCXX_ALWAYS_INLINE bool
167  _M_load_when_equal_for(unsigned __val, memory_order __mo,
168  const chrono::duration<_Rep, _Period>& __rtime)
169  {
170  return _M_load_when_equal_until(__val, __mo,
171  __clock_t::now() + __rtime);
172  }
173 
174  // Returns false iff a timeout occurred.
175  template<typename _Clock, typename _Duration>
176  _GLIBCXX_ALWAYS_INLINE bool
177  _M_load_when_equal_until(unsigned __val, memory_order __mo,
178  const chrono::time_point<_Clock, _Duration>& __atime)
179  {
180  // DR 887 - Sync unknown clock to known clock.
181  const typename _Clock::time_point __c_entry = _Clock::now();
182  const __clock_t::time_point __s_entry = __clock_t::now();
183  const auto __delta = __atime - __c_entry;
184  const auto __s_atime = __s_entry + __delta;
185  return _M_load_when_equal_until(__val, __mo, __s_atime);
186  }
187 
188  // Returns false iff a timeout occurred.
189  template<typename _Duration>
190  _GLIBCXX_ALWAYS_INLINE bool
191  _M_load_when_equal_until(unsigned __val, memory_order __mo,
192  const chrono::time_point<__clock_t, _Duration>& __atime)
193  {
194  unsigned __i = _M_load(__mo);
195  if ((__i & ~_Waiter_bit) == __val)
196  return true;
197  // TODO Spin-wait first. Ignore effect on timeout.
198  __i = _M_load_and_test_until_impl(__i, __val, true, __mo, __atime);
199  return (__i & ~_Waiter_bit) == __val;
200  }
201 
202  _GLIBCXX_ALWAYS_INLINE void
203  _M_store_notify_all(unsigned __val, memory_order __mo)
204  {
205  unsigned* __futex = (unsigned *)(void *)&_M_data;
206  if (_M_data.exchange(__val, __mo) & _Waiter_bit)
207  _M_futex_notify_all(__futex);
208  }
209  };
210 
211 #else // ! (_GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1)
212 
213  // If futexes are not available, use a mutex and a condvar to wait.
214  // Because we access the data only within critical sections, all accesses
215  // are sequentially consistent; thus, we satisfy any provided memory_order.
216  template <unsigned _Waiter_bit = 0x80000000>
217  class __atomic_futex_unsigned
218  {
219  typedef chrono::system_clock __clock_t;
220 
221  unsigned _M_data;
222  mutex _M_mutex;
223  condition_variable _M_condvar;
224 
225  public:
226  explicit
227  __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
228  { }
229 
230  _GLIBCXX_ALWAYS_INLINE unsigned
231  _M_load(memory_order __mo)
232  {
233  unique_lock<mutex> __lock(_M_mutex);
234  return _M_data;
235  }
236 
237  _GLIBCXX_ALWAYS_INLINE unsigned
238  _M_load_when_not_equal(unsigned __val, memory_order __mo)
239  {
240  unique_lock<mutex> __lock(_M_mutex);
241  while (_M_data == __val)
242  _M_condvar.wait(__lock);
243  return _M_data;
244  }
245 
246  _GLIBCXX_ALWAYS_INLINE void
247  _M_load_when_equal(unsigned __val, memory_order __mo)
248  {
249  unique_lock<mutex> __lock(_M_mutex);
250  while (_M_data != __val)
251  _M_condvar.wait(__lock);
252  }
253 
254  template<typename _Rep, typename _Period>
255  _GLIBCXX_ALWAYS_INLINE bool
256  _M_load_when_equal_for(unsigned __val, memory_order __mo,
257  const chrono::duration<_Rep, _Period>& __rtime)
258  {
259  unique_lock<mutex> __lock(_M_mutex);
260  return _M_condvar.wait_for(__lock, __rtime,
261  [&] { return _M_data == __val;});
262  }
263 
264  template<typename _Clock, typename _Duration>
265  _GLIBCXX_ALWAYS_INLINE bool
266  _M_load_when_equal_until(unsigned __val, memory_order __mo,
267  const chrono::time_point<_Clock, _Duration>& __atime)
268  {
269  unique_lock<mutex> __lock(_M_mutex);
270  return _M_condvar.wait_until(__lock, __atime,
271  [&] { return _M_data == __val;});
272  }
273 
274  _GLIBCXX_ALWAYS_INLINE void
275  _M_store_notify_all(unsigned __val, memory_order __mo)
276  {
277  unique_lock<mutex> __lock(_M_mutex);
278  _M_data = __val;
279  _M_condvar.notify_all();
280  }
281  };
282 
283 #endif // _GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1
284 #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
285 
286 _GLIBCXX_END_NAMESPACE_VERSION
287 } // namespace std
288 
289 #endif
memory_order
Enumeration for memory_order.
Definition: atomic_base.h:55
ISO C++ entities toplevel namespace is std.