[3/6] nptl: Handle robust PI mutexes for !__ASSUME_SET_ROBUST_LIST

Message ID 20210610193639.3650754-4-adhemerval.zanella@linaro.org
State New
Headers show
Series
  • More NPTL fixes
Related show

Commit Message

Adhemerval Zanella via Libc-alpha June 10, 2021, 7:36 p.m.
The robust PI mutexes are signaled by setting the LSB bit to 1, so
the code requires to take this consideration before access the
__pthread_mutex_s.

The code is also simplified: the initialization code is not really
required, PD->robust_head.list and PD->robust_list.__next are
essentially the same regardless of __PTHREAD_MUTEX_HAVE_PREV, the futex
wake is optimized to be issued only when required, and the futex shared
bit is set only when required.

Checked on a build for m68k-linux-gnu.  I also checked on
x86_64-linux-gnu by removing the check for !__ASSUME_SET_ROBUST_LIST.
---
 nptl/pthread_create.c | 53 ++++++++++++++++++++++---------------------
 1 file changed, 27 insertions(+), 26 deletions(-)

-- 
2.30.2

Patch

diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index 3f017f1e26..8f1a77b5a8 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -478,35 +478,36 @@  start_thread (void *arg)
     exit (0);
 
 #ifndef __ASSUME_SET_ROBUST_LIST
-  /* If this thread has any robust mutexes locked, handle them now.  */
-# if __PTHREAD_MUTEX_HAVE_PREV
-  void *robust = pd->robust_head.list;
-# else
-  __pthread_slist_t *robust = pd->robust_list.__next;
-# endif
-  /* We let the kernel do the notification if it is able to do so.
-     If we have to do it here there for sure are no PI mutexes involved
-     since the kernel support for them is even more recent.  */
-  if (!__nptl_set_robust_list_avail
-      && __builtin_expect (robust != (void *) &pd->robust_head, 0))
+  /* We let the kernel do the notification if it is able to do so on the exit
+     syscall.  Otherwise we need to handle before the thread terminates.  */
+  void **robust;
+  while ((robust = pd->robust_head.list)
+	 && robust != (void *) &pd->robust_head)
     {
-      do
+      /* Note: robust PI futexes are signaled by setting bit 0.  */
+      void **robustp = (void **) ((uintptr_t) robust & ~1UL);
+
+      struct __pthread_mutex_s *mtx = (struct __pthread_mutex_s *)
+	((char *) robustp - offsetof (struct __pthread_mutex_s,
+				      __list.__next));
+      unsigned int nusers = mtx->__nusers;
+      int shared = mtx->__kind & 128;
+
+      pd->robust_head.list_op_pending = robust;
+      pd->robust_head.list = *robustp;
+      /* Although the list will not be changed at this point, it follows the
+         expected kernel ABI.  */
+      __asm ("" ::: "memory");
+
+      int lock = atomic_exchange_relaxed (&mtx->__lock, FUTEX_OWNER_DIED);
+      /* Wake any users if mutex is acquired with potential users.  */
+      if (lock > 1 || nusers != 0)
 	{
-	  struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
-	    ((char *) robust - offsetof (struct __pthread_mutex_s,
-					 __list.__next));
-	  robust = *((void **) robust);
-
-# if __PTHREAD_MUTEX_HAVE_PREV
-	  this->__list.__prev = NULL;
-# endif
-	  this->__list.__next = NULL;
-
-	  atomic_or (&this->__lock, FUTEX_OWNER_DIED);
-	  futex_wake ((unsigned int *) &this->__lock, 1,
-		      /* XYZ */ FUTEX_SHARED);
+	  if ((uintptr_t) robust & 1)
+	    futex_unlock_pi ((unsigned int *) &mtx->__lock, shared);
+	  else
+	    futex_wake ((unsigned int *) &mtx->__lock, 1, shared);
 	}
-      while (robust != (void *) &pd->robust_head);
     }
 #endif