MEDIUM: threads: Use the new _HA_ATOMIC_* macros.
authorOlivier Houchard <ohouchard@haproxy.com>
Fri, 8 Mar 2019 17:51:17 +0000 (18:51 +0100)
committerOlivier Houchard <cognet@ci0.org>
Mon, 11 Mar 2019 16:02:38 +0000 (17:02 +0100)
Use the new _HA_ATOMIC_* macros and add barriers where needed.

src/haproxy.c
src/hathreads.c

index b98d1f1..781d962 100644 (file)
@@ -2739,11 +2739,11 @@ static void run_poll_loop()
                else if (signal_queue_len && tid == 0)
                        activity[tid].wake_signal++;
                else {
-                       HA_ATOMIC_OR(&sleeping_thread_mask, tid_bit);
-                       __ha_barrier_store();
+                       _HA_ATOMIC_OR(&sleeping_thread_mask, tid_bit);
+                       __ha_barrier_atomic_store();
                        if (active_tasks_mask & tid_bit) {
                                activity[tid].wake_tasks++;
-                               HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
+                               _HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
                        } else
                                exp = next;
                }
@@ -2751,7 +2751,7 @@ static void run_poll_loop()
                /* The poller will ensure it returns around <next> */
                cur_poller.poll(&cur_poller, exp);
                if (sleeping_thread_mask & tid_bit)
-                       HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
+                       _HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
                fd_process_cached_events();
 
                activity[tid].loops++;
@@ -2787,7 +2787,7 @@ static void *run_thread_poll_loop(void *data)
                ptdf->fct();
 
 #ifdef USE_THREAD
-       HA_ATOMIC_AND(&all_threads_mask, ~tid_bit);
+       _HA_ATOMIC_AND(&all_threads_mask, ~tid_bit);
        if (tid > 0)
                pthread_exit(NULL);
 #endif
index 3077e49..1826f92 100644 (file)
@@ -46,7 +46,7 @@ struct lock_stat lock_stats[LOCK_LABELS];
  */
 void thread_harmless_till_end()
 {
-               HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
+               _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
                while (threads_want_rdv_mask & all_threads_mask) {
 #if _POSIX_PRIORITY_SCHEDULING
                        sched_yield();
@@ -65,16 +65,16 @@ void thread_isolate()
 {
        unsigned long old;
 
-       HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
-       __ha_barrier_store();
-       HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
+       _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
+       __ha_barrier_atomic_store();
+       _HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
 
        /* wait for all threads to become harmless */
        old = threads_harmless_mask;
        while (1) {
                if (unlikely((old & all_threads_mask) != all_threads_mask))
                        old = threads_harmless_mask;
-               else if (HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
+               else if (_HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
                        break;
 
 #if _POSIX_PRIORITY_SCHEDULING
@@ -95,7 +95,7 @@ void thread_isolate()
  */
 void thread_release()
 {
-       HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
+       _HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
        thread_harmless_end();
 }