[v2,2/2] elf, nptl: Resolve recursive lock implementation early

Message ID 688b28fc77f34d39195dd102f01ece89711e3ac6.1620141231.git.fweimer@redhat.com
State New
Headers show
Series
  • nptl: Remove delayed rtld lock initialization
Related show

Commit Message

Adhemerval Zanella via Libc-alpha May 4, 2021, 3:15 p.m.
If libpthread is included in libc, it is not necessary to delay
initialization of the lock/unlock function pointers until libpthread
is loaded.  This eliminates two unprotected function pointers
from _rtld_global and removes some initialization code from
libpthread.
---
 elf/Makefile               |  3 +-
 elf/dl-lock.c              | 56 ++++++++++++++++++++++++++++++++++++++
 elf/rtld.c                 | 18 ++++++++++++
 nptl/nptl-init.c           |  9 ------
 sysdeps/generic/ldsodefs.h | 25 ++++++++++++++++-
 sysdeps/nptl/libc-lockP.h  | 17 +++---------
 6 files changed, 104 insertions(+), 24 deletions(-)
 create mode 100644 elf/dl-lock.c

-- 
2.30.2

Patch

diff --git a/elf/Makefile b/elf/Makefile
index f09988f7d2..08cfa75ffe 100644
--- a/elf/Makefile
+++ b/elf/Makefile
@@ -66,7 +66,8 @@  elide-routines.os = $(all-dl-routines) dl-support enbl-secure dl-origin \
 # interpreter and operating independent of libc.
 rtld-routines	= rtld $(all-dl-routines) dl-sysdep dl-environ dl-minimal \
   dl-error-minimal dl-conflict dl-hwcaps dl-hwcaps_split dl-hwcaps-subdirs \
-  dl-usage dl-diagnostics dl-diagnostics-kernel dl-diagnostics-cpu
+  dl-usage dl-diagnostics dl-diagnostics-kernel dl-diagnostics-cpu \
+  dl-lock
 all-rtld-routines = $(rtld-routines) $(sysdep-rtld-routines)
 
 CFLAGS-dl-runtime.c += -fexceptions -fasynchronous-unwind-tables
diff --git a/elf/dl-lock.c b/elf/dl-lock.c
new file mode 100644
index 0000000000..2db15e3a2f
--- /dev/null
+++ b/elf/dl-lock.c
@@ -0,0 +1,56 @@ 
+/* Recursive locking implementation for the dynamic loader.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+/* If libpthread is not part of libc, a legal mechanism using
+   late-initialized function pointers in _rtld_global is used, and not
+   the code below.  */
+#if PTHREAD_IN_LIBC
+# include <assert.h>
+# include <first-versions.h>
+# include <ldsodefs.h>
+
+__typeof (pthread_mutex_lock) *___rtld_mutex_lock attribute_relro;
+__typeof (pthread_mutex_unlock) *___rtld_mutex_unlock attribute_relro;
+
+void
+__rtld_mutex_init (void)
+{
+  /* There is an implicit assumption here that the lock counters are
+     zero and this function is called while nothing is locked.  For
+     early initialization of the mutex functions this is true because
+     it happens directly in dl_main in elf/rtld.c, and not some ELF
+     constructor while holding loader locks.  */
+
+  struct link_map *libc_map = GL (dl_ns)[LM_ID_BASE].libc_map;
+
+  const ElfW(Sym) *sym
+    = _dl_lookup_direct (libc_map, "pthread_mutex_lock",
+                         0x4f152227, /* dl_new_hash output.  */
+                         FIRST_VERSION_libc_pthread_mutex_lock_STRING,
+                         FIRST_VERSION_libc_pthread_mutex_lock_HASH);
+  assert (sym != NULL);
+  ___rtld_mutex_lock = DL_SYMBOL_ADDRESS (libc_map, sym);
+
+  sym = _dl_lookup_direct (libc_map, "pthread_mutex_unlock",
+                           0x7dd7aaaa, /* dl_new_hash output.  */
+                           FIRST_VERSION_libc_pthread_mutex_unlock_STRING,
+                           FIRST_VERSION_libc_pthread_mutex_unlock_HASH);
+  assert (sym != NULL);
+  ___rtld_mutex_unlock = DL_SYMBOL_ADDRESS (libc_map, sym);
+}
+#endif
diff --git a/elf/rtld.c b/elf/rtld.c
index 34879016ad..a18439251f 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -857,6 +857,14 @@  rtld_lock_default_unlock_recursive (void *lock)
   __rtld_lock_default_unlock_recursive (lock);
 }
 #endif
+#if PTHREAD_IN_LIBC
+/* Dummy implementation.  See __rtld_mutex_init.  */
+static int
+rtld_mutex_dummy (pthread_mutex_t *lock)
+{
+  return 0;
+}
+#endif
 
 
 static void
@@ -1146,6 +1154,10 @@  dl_main (const ElfW(Phdr) *phdr,
   GL(dl_rtld_lock_recursive) = rtld_lock_default_lock_recursive;
   GL(dl_rtld_unlock_recursive) = rtld_lock_default_unlock_recursive;
 #endif
+#if PTHREAD_IN_LIBC
+  ___rtld_mutex_lock = rtld_mutex_dummy;
+  ___rtld_mutex_unlock = rtld_mutex_dummy;
+#endif
 
   /* The explicit initialization here is cheaper than processing the reloc
      in the _rtld_local definition's initializer.  */
@@ -2361,6 +2373,9 @@  dl_main (const ElfW(Phdr) *phdr,
 	 loader.  */
       __rtld_malloc_init_real (main_map);
 
+      /* Likewise for the locking implementation.  */
+      __rtld_mutex_init ();
+
       /* Mark all the objects so we know they have been already relocated.  */
       for (struct link_map *l = main_map; l != NULL; l = l->l_next)
 	{
@@ -2466,6 +2481,9 @@  dl_main (const ElfW(Phdr) *phdr,
 	 at this point.  */
       __rtld_malloc_init_real (main_map);
 
+      /* Likewise for the locking implementation.  */
+      __rtld_mutex_init ();
+
       RTLD_TIMING_VAR (start);
       rtld_timer_start (&start);
 
diff --git a/nptl/nptl-init.c b/nptl/nptl-init.c
index b0879bd87e..f7d1c2c21e 100644
--- a/nptl/nptl-init.c
+++ b/nptl/nptl-init.c
@@ -179,15 +179,6 @@  __pthread_initialize_minimal_internal (void)
   lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
 
 #ifdef SHARED
-  /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
-     keep the lock count from the ld.so implementation.  */
-  GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
-  GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
-  unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
-  GL(dl_load_lock).mutex.__data.__count = 0;
-  while (rtld_lock_count-- > 0)
-    __pthread_mutex_lock (&GL(dl_load_lock).mutex);
-
   GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
 #endif
 
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 67c6686015..61326e8e5b 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -403,7 +403,7 @@  struct rtld_global
   struct auditstate _dl_rtld_auditstate[DL_NNS];
 #endif
 
-#if defined SHARED && defined _LIBC_REENTRANT \
+#if !PTHREAD_IN_LIBC && defined SHARED \
     && defined __rtld_lock_default_lock_recursive
   EXTERN void (*_dl_rtld_lock_recursive) (void *);
   EXTERN void (*_dl_rtld_unlock_recursive) (void *);
@@ -1299,6 +1299,29 @@  link_map_audit_state (struct link_map *l, size_t index)
 }
 #endif /* SHARED */
 
+#if PTHREAD_IN_LIBC && defined SHARED
+/* Recursive locking implementation for use within the dynamic loader.
+   Used to define the __rtld_lock_lock_recursive and
+   __rtld_lock_unlock_recursive via <libc-lock.h>.  Initialized to a
+   no-op dummy implementation early.  Similar
+   to GL (dl_rtld_lock_recursive) and GL (dl_rtld_unlock_recursive)
+   in !PTHREAD_IN_LIBC builds.  */
+extern int (*___rtld_mutex_lock) (pthread_mutex_t *) attribute_hidden;
+extern int (*___rtld_mutex_unlock) (pthread_mutex_t *lock) attribute_hidden;
+
+/* Called after libc has been loaded, but before RELRO is activated.
+   Used to initialize the function pointers to the actual
+   implementations.  */
+void __rtld_mutex_init (void) attribute_hidden;
+#else /* !PTHREAD_IN_LIBC */
+static inline void
+__rtld_mutex_init (void)
+{
+  /* The initialization happens later (!PTHREAD_IN_LIBC) or is not
+     needed at all (!SHARED).  */
+}
+#endif /* !PTHREAD_IN_LIBC */
+
 #if THREAD_GSCOPE_IN_TCB
 void __thread_gscope_wait (void) attribute_hidden;
 # define THREAD_GSCOPE_WAIT() __thread_gscope_wait ()
diff --git a/sysdeps/nptl/libc-lockP.h b/sysdeps/nptl/libc-lockP.h
index ae9691d40e..ec7b02bbdd 100644
--- a/sysdeps/nptl/libc-lockP.h
+++ b/sysdeps/nptl/libc-lockP.h
@@ -151,9 +151,6 @@  _Static_assert (LLL_LOCK_INITIALIZER == 0, "LLL_LOCK_INITIALIZER != 0");
   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
 #endif
 
-#define __rtld_lock_trylock_recursive(NAME) \
-  __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
-
 /* Unlock the named lock variable.  */
 #if IS_IN (libc) || IS_IN (libpthread)
 # define __libc_lock_unlock(NAME) \
@@ -163,19 +160,13 @@  _Static_assert (LLL_LOCK_INITIALIZER == 0, "LLL_LOCK_INITIALIZER != 0");
 #endif
 #define __libc_rwlock_unlock(NAME) __pthread_rwlock_unlock (&(NAME))
 
-#ifdef SHARED
-# define __rtld_lock_default_lock_recursive(lock) \
-  ++((pthread_mutex_t *)(lock))->__data.__count;
-
-# define __rtld_lock_default_unlock_recursive(lock) \
-  --((pthread_mutex_t *)(lock))->__data.__count;
-
+#if IS_IN (rtld)
 # define __rtld_lock_lock_recursive(NAME) \
-  GL(dl_rtld_lock_recursive) (&(NAME).mutex)
+  ___rtld_mutex_lock (&(NAME).mutex)
 
 # define __rtld_lock_unlock_recursive(NAME) \
-  GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
-#else
+  ___rtld_mutex_unlock (&(NAME).mutex)
+#else /* Not in the dynamic loader.  */
 # define __rtld_lock_lock_recursive(NAME) \
   __pthread_mutex_lock (&(NAME).mutex)