aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--main/musl/0001-reorder-thread-list-unlink-in-pthread_exit-after-all.patch56
-rw-r--r--main/musl/0002-don-t-use-libc.threads_minus_1-as-relaxed-atomic-for.patch78
-rw-r--r--main/musl/0003-cut-down-size-of-some-libc-struct-members.patch30
-rw-r--r--main/musl/0004-restore-lock-skipping-for-processes-that-return-to-s.patch101
-rw-r--r--main/musl/APKBUILD13
-rw-r--r--main/musl/dont-use-threads-minus-1-for-skipping-locks.patch70
6 files changed, 275 insertions, 73 deletions
diff --git a/main/musl/0001-reorder-thread-list-unlink-in-pthread_exit-after-all.patch b/main/musl/0001-reorder-thread-list-unlink-in-pthread_exit-after-all.patch
new file mode 100644
index 00000000000..2706d1516ab
--- /dev/null
+++ b/main/musl/0001-reorder-thread-list-unlink-in-pthread_exit-after-all.patch
@@ -0,0 +1,56 @@
+From 4d5aa20a94a2d3fae3e69289dc23ecafbd0c16c4 Mon Sep 17 00:00:00 2001
+From: Rich Felker <dalias@aerifal.cx>
+Date: Fri, 22 May 2020 17:35:14 -0400
+Subject: [PATCH 1/4] reorder thread list unlink in pthread_exit after all
+ locks
+
+since the backend for LOCK() skips locking if single-threaded, it's
+unsafe to make the process appear single-threaded before the last use
+of lock.
+
+this fixes potential unsynchronized access to a linked list via
+__dl_thread_cleanup.
+---
+ src/thread/pthread_create.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
+index 5f491092..6a3b0c21 100644
+--- a/src/thread/pthread_create.c
++++ b/src/thread/pthread_create.c
+@@ -90,14 +90,7 @@ _Noreturn void __pthread_exit(void *result)
+ exit(0);
+ }
+
+- /* At this point we are committed to thread termination. Unlink
+- * the thread from the list. This change will not be visible
+- * until the lock is released, which only happens after SYS_exit
+- * has been called, via the exit futex address pointing at the lock. */
+- libc.threads_minus_1--;
+- self->next->prev = self->prev;
+- self->prev->next = self->next;
+- self->prev = self->next = self;
++ /* At this point we are committed to thread termination. */
+
+ /* Process robust list in userspace to handle non-pshared mutexes
+ * and the detached thread case where the robust list head will
+@@ -121,6 +114,16 @@ _Noreturn void __pthread_exit(void *result)
+ __do_orphaned_stdio_locks();
+ __dl_thread_cleanup();
+
++ /* Last, unlink thread from the list. This change will not be visible
++ * until the lock is released, which only happens after SYS_exit
++ * has been called, via the exit futex address pointing at the lock.
++ * This needs to happen after any possible calls to LOCK() that might
++ * skip locking if libc.threads_minus_1 is zero. */
++ libc.threads_minus_1--;
++ self->next->prev = self->prev;
++ self->prev->next = self->next;
++ self->prev = self->next = self;
++
+ /* This atomic potentially competes with a concurrent pthread_detach
+ * call; the loser is responsible for freeing thread resources. */
+ int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
+--
+2.21.0
+
diff --git a/main/musl/0002-don-t-use-libc.threads_minus_1-as-relaxed-atomic-for.patch b/main/musl/0002-don-t-use-libc.threads_minus_1-as-relaxed-atomic-for.patch
new file mode 100644
index 00000000000..370667fe521
--- /dev/null
+++ b/main/musl/0002-don-t-use-libc.threads_minus_1-as-relaxed-atomic-for.patch
@@ -0,0 +1,78 @@
+From e01b5939b38aea5ecbe41670643199825874b26c Mon Sep 17 00:00:00 2001
+From: Rich Felker <dalias@aerifal.cx>
+Date: Thu, 21 May 2020 23:32:45 -0400
+Subject: [PATCH 2/4] don't use libc.threads_minus_1 as relaxed atomic for
+ skipping locks
+
+after all but the last thread exits, the next thread to observe
+libc.threads_minus_1==0 and conclude that it can skip locking fails to
+synchronize with any changes to memory that were made by the
+last-exiting thread. this can produce data races.
+
+on some archs, at least x86, memory synchronization is unlikely to be
+a problem; however, with the inline locks in malloc, skipping the lock
+also eliminated the compiler barrier, and caused code that needed to
+re-check chunk in-use bits after obtaining the lock to reuse a stale
+value, possibly from before the process became single-threaded. this
+in turn produced corruption of the heap state.
+
+some uses of libc.threads_minus_1 remain, especially for allocation of
+new TLS in the dynamic linker; otherwise, it could be removed
+entirely. it's made non-volatile to reflect that the remaining
+accesses are only made under lock on the thread list.
+
+instead of libc.threads_minus_1, libc.threaded is now used for
+skipping locks. the difference is that libc.threaded is permanently
+true once an additional thread has been created. this will produce
+some performance regression in processes that are mostly
+single-threaded but occasionally creating threads. in the future it
+may be possible to bring back the full lock-skipping, but more care
+needs to be taken to produce a safe design.
+---
+ src/internal/libc.h | 2 +-
+ src/malloc/malloc.c | 2 +-
+ src/thread/__lock.c | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/src/internal/libc.h b/src/internal/libc.h
+index ac97dc7e..c0614852 100644
+--- a/src/internal/libc.h
++++ b/src/internal/libc.h
+@@ -21,7 +21,7 @@ struct __libc {
+ int can_do_threads;
+ int threaded;
+ int secure;
+- volatile int threads_minus_1;
++ int threads_minus_1;
+ size_t *auxv;
+ struct tls_module *tls_head;
+ size_t tls_size, tls_align, tls_cnt;
+diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
+index 96982596..2553a62e 100644
+--- a/src/malloc/malloc.c
++++ b/src/malloc/malloc.c
+@@ -26,7 +26,7 @@ int __malloc_replaced;
+
+ static inline void lock(volatile int *lk)
+ {
+- if (libc.threads_minus_1)
++ if (libc.threaded)
+ while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
+ }
+
+diff --git a/src/thread/__lock.c b/src/thread/__lock.c
+index 45557c88..5b9b144e 100644
+--- a/src/thread/__lock.c
++++ b/src/thread/__lock.c
+@@ -18,7 +18,7 @@
+
+ void __lock(volatile int *l)
+ {
+- if (!libc.threads_minus_1) return;
++ if (!libc.threaded) return;
+ /* fast path: INT_MIN for the lock, +1 for the congestion */
+ int current = a_cas(l, 0, INT_MIN + 1);
+ if (!current) return;
+--
+2.21.0
+
diff --git a/main/musl/0003-cut-down-size-of-some-libc-struct-members.patch b/main/musl/0003-cut-down-size-of-some-libc-struct-members.patch
new file mode 100644
index 00000000000..b9e32db8f7c
--- /dev/null
+++ b/main/musl/0003-cut-down-size-of-some-libc-struct-members.patch
@@ -0,0 +1,30 @@
+From f12888e9eb9eed60cc266b899dcafecb4752964a Mon Sep 17 00:00:00 2001
+From: Rich Felker <dalias@aerifal.cx>
+Date: Fri, 22 May 2020 17:25:38 -0400
+Subject: [PATCH 3/4] cut down size of some libc struct members
+
+these are all flags that can be single-byte values.
+---
+ src/internal/libc.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/src/internal/libc.h b/src/internal/libc.h
+index c0614852..d47f58e0 100644
+--- a/src/internal/libc.h
++++ b/src/internal/libc.h
+@@ -18,9 +18,9 @@ struct tls_module {
+ };
+
+ struct __libc {
+- int can_do_threads;
+- int threaded;
+- int secure;
++ char can_do_threads;
++ char threaded;
++ char secure;
+ int threads_minus_1;
+ size_t *auxv;
+ struct tls_module *tls_head;
+--
+2.21.0
+
diff --git a/main/musl/0004-restore-lock-skipping-for-processes-that-return-to-s.patch b/main/musl/0004-restore-lock-skipping-for-processes-that-return-to-s.patch
new file mode 100644
index 00000000000..b30bd497724
--- /dev/null
+++ b/main/musl/0004-restore-lock-skipping-for-processes-that-return-to-s.patch
@@ -0,0 +1,101 @@
+From 8d81ba8c0bc6fe31136cb15c9c82ef4c24965040 Mon Sep 17 00:00:00 2001
+From: Rich Felker <dalias@aerifal.cx>
+Date: Fri, 22 May 2020 17:45:47 -0400
+Subject: [PATCH 4/4] restore lock-skipping for processes that return to
+ single-threaded state
+
+the design used here relies on the barrier provided by the first lock
+operation after the process returns to single-threaded state to
+synchronize with actions by the last thread that exited. by storing
+the intent to change modes in the same object used to detect whether
+locking is needed, it's possible to avoid an extra (possibly costly)
+memory load after the lock is taken.
+---
+ src/internal/libc.h | 1 +
+ src/malloc/malloc.c | 5 ++++-
+ src/thread/__lock.c | 4 +++-
+ src/thread/pthread_create.c | 8 ++++----
+ 4 files changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/src/internal/libc.h b/src/internal/libc.h
+index d47f58e0..619bba86 100644
+--- a/src/internal/libc.h
++++ b/src/internal/libc.h
+@@ -21,6 +21,7 @@ struct __libc {
+ char can_do_threads;
+ char threaded;
+ char secure;
++ volatile signed char need_locks;
+ int threads_minus_1;
+ size_t *auxv;
+ struct tls_module *tls_head;
+diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
+index 2553a62e..a803d4c9 100644
+--- a/src/malloc/malloc.c
++++ b/src/malloc/malloc.c
+@@ -26,8 +26,11 @@ int __malloc_replaced;
+
+ static inline void lock(volatile int *lk)
+ {
+- if (libc.threaded)
++ int need_locks = libc.need_locks;
++ if (need_locks) {
+ while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
++ if (need_locks < 0) libc.need_locks = 0;
++ }
+ }
+
+ static inline void unlock(volatile int *lk)
+diff --git a/src/thread/__lock.c b/src/thread/__lock.c
+index 5b9b144e..60eece49 100644
+--- a/src/thread/__lock.c
++++ b/src/thread/__lock.c
+@@ -18,9 +18,11 @@
+
+ void __lock(volatile int *l)
+ {
+- if (!libc.threaded) return;
++ int need_locks = libc.need_locks;
++ if (!need_locks) return;
+ /* fast path: INT_MIN for the lock, +1 for the congestion */
+ int current = a_cas(l, 0, INT_MIN + 1);
++ if (need_locks < 0) libc.need_locks = 0;
+ if (!current) return;
+ /* A first spin loop, for medium congestion. */
+ for (unsigned i = 0; i < 10; ++i) {
+diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
+index 6a3b0c21..6bdfb44f 100644
+--- a/src/thread/pthread_create.c
++++ b/src/thread/pthread_create.c
+@@ -118,8 +118,8 @@ _Noreturn void __pthread_exit(void *result)
+ * until the lock is released, which only happens after SYS_exit
+ * has been called, via the exit futex address pointing at the lock.
+ * This needs to happen after any possible calls to LOCK() that might
+- * skip locking if libc.threads_minus_1 is zero. */
+- libc.threads_minus_1--;
++ * skip locking if process appears single-threaded. */
++ if (!--libc.threads_minus_1) libc.need_locks = -1;
+ self->next->prev = self->prev;
+ self->prev->next = self->next;
+ self->prev = self->next = self;
+@@ -339,7 +339,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
+ ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
+
+ __tl_lock();
+- libc.threads_minus_1++;
++ if (!libc.threads_minus_1++) libc.need_locks = 1;
+ ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
+
+ /* All clone failures translate to EAGAIN. If explicit scheduling
+@@ -363,7 +363,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
+ new->next->prev = new;
+ new->prev->next = new;
+ } else {
+- libc.threads_minus_1--;
++ if (!--libc.threads_minus_1) libc.need_locks = 0;
+ }
+ __tl_unlock();
+ __restore_sigs(&set);
+--
+2.21.0
+
diff --git a/main/musl/APKBUILD b/main/musl/APKBUILD
index c68fe9eff75..e00c18db4cb 100644
--- a/main/musl/APKBUILD
+++ b/main/musl/APKBUILD
@@ -2,7 +2,7 @@
# Maintainer: Timo Teräs <timo.teras@iki.fi>
pkgname=musl
pkgver=1.1.24
-pkgrel=8
+pkgrel=9
pkgdesc="the musl c library (libc) implementation"
url="https://musl.libc.org/"
arch="all"
@@ -31,7 +31,11 @@ source="https://musl.libc.org/releases/musl-$pkgver.tar.gz
ppc64-fpregset_t.patch
fix-remaining-direct-use-of-stat-syscalls-outside.patch
set-AD-bit-in-dns-queries-suppress-for-internal-use.patch
- dont-use-threads-minus-1-for-skipping-locks.patch
+
+ 0001-reorder-thread-list-unlink-in-pthread_exit-after-all.patch
+ 0002-don-t-use-libc.threads_minus_1-as-relaxed-atomic-for.patch
+ 0003-cut-down-size-of-some-libc-struct-members.patch
+ 0004-restore-lock-skipping-for-processes-that-return-to-s.patch
ldconfig
__stack_chk_fail_local.c
@@ -187,7 +191,10 @@ e7133ce2f0b172e2da03567e41f03bef58b6ff8eaac614494751228bcc102c39cbe312f0beb567c5
3de8e50519e33a55d2cc737b56011a7178d1c83230477d46e11e67195e08e74bff735c6013de6ff170bb2390e89bfca9919cc8728c064b3eeaab2035dd7e5c08 ppc64-fpregset_t.patch
d277e45af78ed2bd9f556ae30636783fc401b4d0a339d6e37b77f18ed1486f48fc631e3455f8764ddbc7d9aba41a270ab345ec3495955dfd22b27a306441ba2a fix-remaining-direct-use-of-stat-syscalls-outside.patch
dd46ef77b71d34b6207611be59dd4555b4e53fd7169732b9e5ee9a66f1e8da69fcca6634f895b9d34d8861d37ac0eaa86618f5f3f3a81cf9c47321d1c5d37ee5 set-AD-bit-in-dns-queries-suppress-for-internal-use.patch
-1372f9ddcfd48df8929ea5af76dd5e806f89c9d7b70b8a19b8480987b7fab5632e939370f4160cd2c132549cfcda0f4470ad5b2d46f74fd615ef1e9f2f77a72c dont-use-threads-minus-1-for-skipping-locks.patch
+0bb30198e92d26058f98f28be31bb8cee89a699e9fa9377315c9a86f504708ddb432d579bebfd4429da3fd5e6832e4166aa80d4d3f08658702206474fd1a8ffe 0001-reorder-thread-list-unlink-in-pthread_exit-after-all.patch
+c6cd692359961c382b7382c6eb819bf93ca8d687b7bd71e4d992acabc7003bd8388b22898059b6ac5abdf08c07020c8cbd2a5f908802258418079e096cdbc058 0002-don-t-use-libc.threads_minus_1-as-relaxed-atomic-for.patch
+416aa17d4211ebbca97b4ce47eea8d8375ac5e0b602a4806175bc172be21b8291ac08fdaf39203e3a083a067bbbfa999cfb875f739d5fa07871885ac02eba75a 0003-cut-down-size-of-some-libc-struct-members.patch
+f0e5ab79a793152c5e6c1c560b409ef062c8f9232c893b36c58b9667e5e0e86969555d8e2e9e17b59835ec7ed3c926a6ccce453824561ad82e47364c44374549 0004-restore-lock-skipping-for-processes-that-return-to-s.patch
8d3a2d5315fc56fee7da9abb8b89bb38c6046c33d154c10d168fb35bfde6b0cf9f13042a3bceee34daf091bc409d699223735dcf19f382eeee1f6be34154f26f ldconfig
062bb49fa54839010acd4af113e20f7263dde1c8a2ca359b5fb2661ef9ed9d84a0f7c3bc10c25dcfa10bb3c5a4874588dff636ac43d5dbb3d748d75400756d0b __stack_chk_fail_local.c
0d80f37b34a35e3d14b012257c50862dfeb9d2c81139ea2dfa101d981d093b009b9fa450ba27a708ac59377a48626971dfc58e20a3799084a65777a0c32cbc7d getconf.c
diff --git a/main/musl/dont-use-threads-minus-1-for-skipping-locks.patch b/main/musl/dont-use-threads-minus-1-for-skipping-locks.patch
deleted file mode 100644
index 6d40ebacf41..00000000000
--- a/main/musl/dont-use-threads-minus-1-for-skipping-locks.patch
+++ /dev/null
@@ -1,70 +0,0 @@
-commit cbcb082e045e28abce2b3f72e922c6ccdcb96c06
-Author: Rich Felker <dalias@aerifal.cx>
-Date: Thu May 21 23:32:45 2020 -0400
-
- don't use libc.threads_minus_1 as relaxed atomic for skipping locks
-
- after all but the last thread exits, the next thread to observe
- libc.threads_minus_1==0 and conclude that it can skip locking fails to
- synchronize with any changes to memory that were made by the
- last-exiting thread. this can produce data races.
-
- on some archs, at least x86, memory synchronization is unlikely to be
- a problem; however, with the inline locks in malloc, skipping the lock
- also eliminated the compiler barrier, and caused code that needed to
- re-check chunk in-use bits after obtaining the lock to reuse a stale
- value, possibly from before the process became single-threaded. this
- in turn produced corruption of the heap state.
-
- some uses of libc.threads_minus_1 remain, especially for allocation of
- new TLS in the dynamic linker; otherwise, it could be removed
- entirely. it's made non-volatile to reflect that the remaining
- accesses are only made under lock on the thread list.
-
- instead of libc.threads_minus_1, libc.threaded is now used for
- skipping locks. the difference is that libc.threaded is permanently
- true once an additional thread has been created. this will produce
- some performance regression in processes that are mostly
- single-threaded but occasionally creating threads. in the future it
- may be possible to bring back the full lock-skipping, but more care
- needs to be taken to produce a safe design.
-
-diff --git a/src/internal/libc.h b/src/internal/libc.h
-index ac97dc7e..c0614852 100644
---- a/src/internal/libc.h
-+++ b/src/internal/libc.h
-@@ -21,7 +21,7 @@ struct __libc {
- int can_do_threads;
- int threaded;
- int secure;
-- volatile int threads_minus_1;
-+ int threads_minus_1;
- size_t *auxv;
- struct tls_module *tls_head;
- size_t tls_size, tls_align, tls_cnt;
-diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
-index 96982596..2553a62e 100644
---- a/src/malloc/malloc.c
-+++ b/src/malloc/malloc.c
-@@ -26,7 +26,7 @@ int __malloc_replaced;
-
- static inline void lock(volatile int *lk)
- {
-- if (libc.threads_minus_1)
-+ if (libc.threaded)
- while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
- }
-
-diff --git a/src/thread/__lock.c b/src/thread/__lock.c
-index 45557c88..5b9b144e 100644
---- a/src/thread/__lock.c
-+++ b/src/thread/__lock.c
-@@ -18,7 +18,7 @@
-
- void __lock(volatile int *l)
- {
-- if (!libc.threads_minus_1) return;
-+ if (!libc.threaded) return;
- /* fast path: INT_MIN for the lock, +1 for the congestion */
- int current = a_cas(l, 0, INT_MIN + 1);
- if (!current) return;