Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
concurrent_monitor.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_concurrent_monitor_H
18 #define __TBB_concurrent_monitor_H
19 
20 #include "tbb/tbb_stddef.h"
21 #include "tbb/atomic.h"
22 #include "tbb/spin_mutex.h"
23 #include "tbb/tbb_exception.h"
24 #include "tbb/aligned_space.h"
25 
26 #include "semaphore.h"
27 
28 namespace tbb {
29 namespace internal {
30 
32 
34 public:
35  struct node_t {
38  explicit node_t() : next((node_t*)(uintptr_t)0xcdcdcdcd), prev((node_t*)(uintptr_t)0xcdcdcdcd) {}
39  };
40 
41  // ctor
43  // dtor
45 
46  inline size_t size() const {return count;}
47  inline bool empty() const {return size()==0;}
48  inline node_t* front() const {return head.next;}
49  inline node_t* last() const {return head.prev;}
50  inline node_t* begin() const {return front();}
51  inline const node_t* end() const {return &head;}
52 
54  inline void add( node_t* n ) {
56  n->prev = head.prev;
57  n->next = &head;
58  head.prev->next = n;
59  head.prev = n;
60  }
61 
63  inline void remove( node_t& n ) {
64  __TBB_ASSERT( count > 0, "attempt to remove an item from an empty list" );
66  n.prev->next = n.next;
67  n.next->prev = n.prev;
68  }
69 
72  if( const size_t l_count = __TBB_load_relaxed(count) ) {
73  __TBB_store_relaxed(lst.count, l_count);
74  lst.head.next = head.next;
75  lst.head.prev = head.prev;
76  head.next->prev = &lst.head;
77  head.prev->next = &lst.head;
78  clear();
79  }
80  }
81 
83 private:
86 };
87 
90 
92 
94 public:
96  class thread_context : waitset_node_t, no_copy {
97  friend class concurrent_monitor;
98  public:
99  thread_context() : skipped_wakeup(false), aborted(false), ready(false), context(0) {
100  epoch = 0;
101  in_waitset = false;
102  }
104  if (ready) {
105  if( skipped_wakeup ) semaphore().P();
106  semaphore().~binary_semaphore();
107  }
108  }
109  binary_semaphore& semaphore() { return *sema.begin(); }
110  private:
112  // Inlining of the method is undesirable, due to extra instructions for
113  // exception support added at caller side.
114  __TBB_NOINLINE( void init() );
116  __TBB_atomic unsigned epoch;
119  bool aborted;
120  bool ready;
121  uintptr_t context;
122  };
123 
126 
128  ~concurrent_monitor() ;
129 
131  void prepare_wait( thread_context& thr, uintptr_t ctx = 0 );
132 
134 
135  inline bool commit_wait( thread_context& thr ) {
136  const bool do_it = thr.epoch == __TBB_load_relaxed(epoch);
137  // this check is just an optimization
138  if( do_it ) {
139  __TBB_ASSERT( thr.ready, "use of commit_wait() without prior prepare_wait()");
140  thr.semaphore().P();
141  __TBB_ASSERT( !thr.in_waitset, "still in the queue?" );
142  if( thr.aborted )
144  } else {
145  cancel_wait( thr );
146  }
147  return do_it;
148  }
150  void cancel_wait( thread_context& thr );
151 
153  template<typename WaitUntil, typename Context>
154  void wait( WaitUntil until, Context on );
155 
157  void notify_one() {atomic_fence(); notify_one_relaxed();}
158 
160  void notify_one_relaxed();
161 
163  void notify_all() {atomic_fence(); notify_all_relaxed();}
164 
166  void notify_all_relaxed();
167 
169  template<typename P> void notify( const P& predicate ) {atomic_fence(); notify_relaxed( predicate );}
170 
172  template<typename P> void notify_relaxed( const P& predicate );
173 
175  void abort_all() {atomic_fence(); abort_all_relaxed(); }
176 
178  void abort_all_relaxed();
179 
180 private:
182  waitset_t waitset_ec;
183  __TBB_atomic unsigned epoch;
184  thread_context* to_thread_context( waitset_node_t* n ) { return static_cast<thread_context*>(n); }
185 };
186 
187 template<typename WaitUntil, typename Context>
188 void concurrent_monitor::wait( WaitUntil until, Context on )
189 {
190  bool slept = false;
191  thread_context thr_ctx;
192  prepare_wait( thr_ctx, on() );
193  while( !until() ) {
194  if( (slept = commit_wait( thr_ctx ) )==true )
195  if( until() ) break;
196  slept = false;
197  prepare_wait( thr_ctx, on() );
198  }
199  if( !slept )
200  cancel_wait( thr_ctx );
201 }
202 
203 template<typename P>
204 void concurrent_monitor::notify_relaxed( const P& predicate ) {
205  if( waitset_ec.empty() )
206  return;
207  waitset_t temp;
208  waitset_node_t* nxt;
209  const waitset_node_t* end = waitset_ec.end();
210  {
211  tbb::spin_mutex::scoped_lock l( mutex_ec );
212  __TBB_store_relaxed(epoch, __TBB_load_relaxed(epoch) + 1);
213  for( waitset_node_t* n=waitset_ec.last(); n!=end; n=nxt ) {
214  nxt = n->prev;
215  thread_context* thr = to_thread_context( n );
216  if( predicate( thr->context ) ) {
217  waitset_ec.remove( *n );
218  thr->in_waitset = false;
219  temp.add( n );
220  }
221  }
222  }
223 
224  end = temp.end();
225  for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) {
226  nxt = n->next;
227  to_thread_context(n)->semaphore().V();
228  }
229 #if TBB_USE_ASSERT
230  temp.clear();
231 #endif
232 }
233 
234 } // namespace internal
235 } // namespace tbb
236 
237 #endif /* __TBB_concurrent_monitor_H */
void P()
wait/acquire
Definition: semaphore.h:105
void notify(const P &predicate)
Notify waiting threads of the event that satisfies the given predicate.
T __TBB_load_relaxed(const volatile T &location)
Definition: tbb_machine.h:738
void add(node_t *n)
add to the back of the list
thread_context * to_thread_context(waitset_node_t *n)
void __TBB_store_relaxed(volatile T &location, V value)
Definition: tbb_machine.h:742
tbb::aligned_space< binary_semaphore > sema
circular_doubly_linked_list_with_sentinel::node_t waitset_node_t
void notify_all()
Notify all waiting threads of the event.
void abort_all()
Abort any sleeping threads at the time of the call.
void wait(WaitUntil until, Context on)
Wait for a condition to be satisfied with waiting-on context.
void P()
wait/acquire
Definition: semaphore.h:235
The graph class.
circular_doubly_linked_list_with_sentinel waitset_t
Edsger Dijkstra&#39;s counting semaphore.
Definition: semaphore.h:94
void notify_one()
Notify one thread about the event.
Represents acquisition of a mutex.
Definition: spin_mutex.h:50
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
#define __TBB_NOINLINE(decl)
Definition: tbb_stddef.h:106
void flush_to(circular_doubly_linked_list_with_sentinel &lst)
move all elements to &#39;lst&#39; and initialize the &#39;this&#39; list
bool commit_wait(thread_context &thr)
Commit wait if event count has not changed; otherwise, cancel wait.
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:331
Block of space aligned sufficiently to construct an array T with N elements.
Definition: aligned_space.h:29
#define __TBB_atomic
Definition: tbb_stddef.h:237
A lock that occupies a single byte.
Definition: spin_mutex.h:36
binary_semaphore for concurrent monitor
Definition: semaphore.h:222
Circular doubly-linked list with sentinel.
void atomic_fence()
Sequentially consistent full memory fence.
Definition: tbb_machine.h:342
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
void notify_relaxed(const P &predicate)
Notify waiting threads of the event that satisfies the given predicate; Relaxed version.

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.