diff options
author | Natanael Copa <ncopa@alpinelinux.org> | 2021-02-23 12:46:44 +0000 |
---|---|---|
committer | Natanael Copa <ncopa@alpinelinux.org> | 2021-02-23 13:49:18 +0100 |
commit | 4c7d801557e9862fe0b8c444dbacb1792130cf24 (patch) | |
tree | 4834fa8d471bd14dad2147f891f7ae00a11b6ae3 | |
parent | 23c8f9b26a2c9dc5503f23898dbd2af11b4242be (diff) |
main/linux-vanilla: build fix for aarch64
upstream discussion:
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org/thread/RIP5GVME6S7BNF26SG4KBT2ZPW5P3DBY/
5 files changed, 622 insertions, 7 deletions
diff --git a/main/linux-vanilla/0001-arm64-Avoid-redundant-type-conversions-in-xchg-and-c.patch b/main/linux-vanilla/0001-arm64-Avoid-redundant-type-conversions-in-xchg-and-c.patch new file mode 100644 index 00000000000..2441864dfb7 --- /dev/null +++ b/main/linux-vanilla/0001-arm64-Avoid-redundant-type-conversions-in-xchg-and-c.patch @@ -0,0 +1,355 @@ +From 7b7b95eca1c3c2d6e5302b813b2b8470d004dedb Mon Sep 17 00:00:00 2001 +From: Will Deacon <will.deacon@arm.com> +Date: Thu, 13 Sep 2018 13:30:45 +0100 +Subject: [PATCH 1/2] arm64: Avoid redundant type conversions in xchg() and + cmpxchg() + +Our atomic instructions (either LSE atomics of LDXR/STXR sequences) +natively support byte, half-word, word and double-word memory accesses +so there is no need to mask the data register prior to being stored. + +Signed-off-by: Will Deacon <will.deacon@arm.com> +(cherry picked from commit 5ef3fe4cecdf82fdd71ce78988403963d01444d4) +--- + arch/arm64/include/asm/atomic_ll_sc.h | 53 ++++++------ + arch/arm64/include/asm/atomic_lse.h | 46 +++++----- + arch/arm64/include/asm/cmpxchg.h | 116 +++++++++++++------------- + 3 files changed, 108 insertions(+), 107 deletions(-) + +diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h +index f5a2d09afb38..f02d3bf7b9e6 100644 +--- a/arch/arm64/include/asm/atomic_ll_sc.h ++++ b/arch/arm64/include/asm/atomic_ll_sc.h +@@ -248,48 +248,49 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) + } + __LL_SC_EXPORT(atomic64_dec_if_positive); + +-#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \ +-__LL_SC_INLINE unsigned long \ +-__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ +- unsigned long old, \ +- unsigned long new)) \ ++#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \ ++__LL_SC_INLINE u##sz \ ++__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ ++ unsigned long old, \ ++ u##sz new)) \ + { \ +- unsigned long tmp, oldval; \ ++ unsigned long tmp; \ ++ u##sz oldval; \ + \ + asm volatile( \ + " prfm pstl1strm, %[v]\n" \ +- "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \ ++ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \ + " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ + " cbnz %" #w "[tmp], 2f\n" \ +- " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ ++ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \ + " cbnz %w[tmp], 1b\n" \ + " " #mb "\n" \ + "2:" \ + : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ +- [v] "+Q" (*(unsigned long *)ptr) \ ++ [v] "+Q" (*(u##sz *)ptr) \ + : [old] "Lr" (old), [new] "r" (new) \ + : cl); \ + \ + return oldval; \ + } \ +-__LL_SC_EXPORT(__cmpxchg_case_##name); ++__LL_SC_EXPORT(__cmpxchg_case_##name##sz); + +-__CMPXCHG_CASE(w, b, 1, , , , ) +-__CMPXCHG_CASE(w, h, 2, , , , ) +-__CMPXCHG_CASE(w, , 4, , , , ) +-__CMPXCHG_CASE( , , 8, , , , ) +-__CMPXCHG_CASE(w, b, acq_1, , a, , "memory") +-__CMPXCHG_CASE(w, h, acq_2, , a, , "memory") +-__CMPXCHG_CASE(w, , acq_4, , a, , "memory") +-__CMPXCHG_CASE( , , acq_8, , a, , "memory") +-__CMPXCHG_CASE(w, b, rel_1, , , l, "memory") +-__CMPXCHG_CASE(w, h, rel_2, , , l, "memory") +-__CMPXCHG_CASE(w, , rel_4, , , l, "memory") +-__CMPXCHG_CASE( , , rel_8, , , l, "memory") +-__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory") +-__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory") +-__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory") +-__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory") ++__CMPXCHG_CASE(w, b, , 8, , , , ) ++__CMPXCHG_CASE(w, h, , 16, , , , ) ++__CMPXCHG_CASE(w, , , 32, , , , ) ++__CMPXCHG_CASE( , , , 64, , , , ) ++__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory") ++__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory") ++__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory") ++__CMPXCHG_CASE( , , acq_, 64, , a, , "memory") ++__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory") ++__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory") ++__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory") ++__CMPXCHG_CASE( , , rel_, 64, , , l, "memory") ++__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory") ++__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory") ++__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory") ++__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory") + + #undef __CMPXCHG_CASE + +diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h +index eab3de4f2ad2..80cadc789f1a 100644 +--- a/arch/arm64/include/asm/atomic_lse.h ++++ b/arch/arm64/include/asm/atomic_lse.h +@@ -480,24 +480,24 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) + + #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op) + +-#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \ +-static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ +- unsigned long old, \ +- unsigned long new) \ ++#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \ ++static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ ++ unsigned long old, \ ++ u##sz new) \ + { \ + register unsigned long x0 asm ("x0") = (unsigned long)ptr; \ + register unsigned long x1 asm ("x1") = old; \ +- register unsigned long x2 asm ("x2") = new; \ ++ register u##sz x2 asm ("x2") = new; \ + \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ + /* LL/SC */ \ +- __LL_SC_CMPXCHG(name) \ ++ __LL_SC_CMPXCHG(name##sz) \ + __nops(2), \ + /* LSE atomics */ \ + " mov " #w "30, %" #w "[old]\n" \ +- " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \ ++ " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \ + " mov %" #w "[ret], " #w "30") \ + : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \ + : [old] "r" (x1), [new] "r" (x2) \ +@@ -506,22 +506,22 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ + return x0; \ + } + +-__CMPXCHG_CASE(w, b, 1, ) +-__CMPXCHG_CASE(w, h, 2, ) +-__CMPXCHG_CASE(w, , 4, ) +-__CMPXCHG_CASE(x, , 8, ) +-__CMPXCHG_CASE(w, b, acq_1, a, "memory") +-__CMPXCHG_CASE(w, h, acq_2, a, "memory") +-__CMPXCHG_CASE(w, , acq_4, a, "memory") +-__CMPXCHG_CASE(x, , acq_8, a, "memory") +-__CMPXCHG_CASE(w, b, rel_1, l, "memory") +-__CMPXCHG_CASE(w, h, rel_2, l, "memory") +-__CMPXCHG_CASE(w, , rel_4, l, "memory") +-__CMPXCHG_CASE(x, , rel_8, l, "memory") +-__CMPXCHG_CASE(w, b, mb_1, al, "memory") +-__CMPXCHG_CASE(w, h, mb_2, al, "memory") +-__CMPXCHG_CASE(w, , mb_4, al, "memory") +-__CMPXCHG_CASE(x, , mb_8, al, "memory") ++__CMPXCHG_CASE(w, b, , 8, ) ++__CMPXCHG_CASE(w, h, , 16, ) ++__CMPXCHG_CASE(w, , , 32, ) ++__CMPXCHG_CASE(x, , , 64, ) ++__CMPXCHG_CASE(w, b, acq_, 8, a, "memory") ++__CMPXCHG_CASE(w, h, acq_, 16, a, "memory") ++__CMPXCHG_CASE(w, , acq_, 32, a, "memory") ++__CMPXCHG_CASE(x, , acq_, 64, a, "memory") ++__CMPXCHG_CASE(w, b, rel_, 8, l, "memory") ++__CMPXCHG_CASE(w, h, rel_, 16, l, "memory") ++__CMPXCHG_CASE(w, , rel_, 32, l, "memory") ++__CMPXCHG_CASE(x, , rel_, 64, l, "memory") ++__CMPXCHG_CASE(w, b, mb_, 8, al, "memory") ++__CMPXCHG_CASE(w, h, mb_, 16, al, "memory") ++__CMPXCHG_CASE(w, , mb_, 32, al, "memory") ++__CMPXCHG_CASE(x, , mb_, 64, al, "memory") + + #undef __LL_SC_CMPXCHG + #undef __CMPXCHG_CASE +diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h +index d8b01c7c9cd3..94ccb3bfbd61 100644 +--- a/arch/arm64/include/asm/cmpxchg.h ++++ b/arch/arm64/include/asm/cmpxchg.h +@@ -30,46 +30,46 @@ + * barrier case is generated as release+dmb for the former and + * acquire+release for the latter. + */ +-#define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \ +-static inline unsigned long __xchg_case_##name(unsigned long x, \ +- volatile void *ptr) \ +-{ \ +- unsigned long ret, tmp; \ +- \ +- asm volatile(ARM64_LSE_ATOMIC_INSN( \ +- /* LL/SC */ \ +- " prfm pstl1strm, %2\n" \ +- "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \ +- " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \ +- " cbnz %w1, 1b\n" \ +- " " #mb, \ +- /* LSE atomics */ \ +- " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \ +- __nops(3) \ +- " " #nop_lse) \ +- : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \ +- : "r" (x) \ +- : cl); \ +- \ +- return ret; \ ++#define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \ ++static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \ ++{ \ ++ u##sz ret; \ ++ unsigned long tmp; \ ++ \ ++ asm volatile(ARM64_LSE_ATOMIC_INSN( \ ++ /* LL/SC */ \ ++ " prfm pstl1strm, %2\n" \ ++ "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \ ++ " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \ ++ " cbnz %w1, 1b\n" \ ++ " " #mb, \ ++ /* LSE atomics */ \ ++ " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \ ++ __nops(3) \ ++ " " #nop_lse) \ ++ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \ ++ : "r" (x) \ ++ : cl); \ ++ \ ++ return ret; \ + } + +-__XCHG_CASE(w, b, 1, , , , , , ) +-__XCHG_CASE(w, h, 2, , , , , , ) +-__XCHG_CASE(w, , 4, , , , , , ) +-__XCHG_CASE( , , 8, , , , , , ) +-__XCHG_CASE(w, b, acq_1, , , a, a, , "memory") +-__XCHG_CASE(w, h, acq_2, , , a, a, , "memory") +-__XCHG_CASE(w, , acq_4, , , a, a, , "memory") +-__XCHG_CASE( , , acq_8, , , a, a, , "memory") +-__XCHG_CASE(w, b, rel_1, , , , , l, "memory") +-__XCHG_CASE(w, h, rel_2, , , , , l, "memory") +-__XCHG_CASE(w, , rel_4, , , , , l, "memory") +-__XCHG_CASE( , , rel_8, , , , , l, "memory") +-__XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory") +-__XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory") +-__XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory") +-__XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory") ++__XCHG_CASE(w, b, , 8, , , , , , ) ++__XCHG_CASE(w, h, , 16, , , , , , ) ++__XCHG_CASE(w, , , 32, , , , , , ) ++__XCHG_CASE( , , , 64, , , , , , ) ++__XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory") ++__XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory") ++__XCHG_CASE(w, , acq_, 32, , , a, a, , "memory") ++__XCHG_CASE( , , acq_, 64, , , a, a, , "memory") ++__XCHG_CASE(w, b, rel_, 8, , , , , l, "memory") ++__XCHG_CASE(w, h, rel_, 16, , , , , l, "memory") ++__XCHG_CASE(w, , rel_, 32, , , , , l, "memory") ++__XCHG_CASE( , , rel_, 64, , , , , l, "memory") ++__XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory") ++__XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory") ++__XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory") ++__XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory") + + #undef __XCHG_CASE + +@@ -80,13 +80,13 @@ static __always_inline unsigned long __xchg##sfx(unsigned long x, \ + { \ + switch (size) { \ + case 1: \ +- return __xchg_case##sfx##_1(x, ptr); \ ++ return __xchg_case##sfx##_8(x, ptr); \ + case 2: \ +- return __xchg_case##sfx##_2(x, ptr); \ ++ return __xchg_case##sfx##_16(x, ptr); \ + case 4: \ +- return __xchg_case##sfx##_4(x, ptr); \ ++ return __xchg_case##sfx##_32(x, ptr); \ + case 8: \ +- return __xchg_case##sfx##_8(x, ptr); \ ++ return __xchg_case##sfx##_64(x, ptr); \ + default: \ + BUILD_BUG(); \ + } \ +@@ -123,13 +123,13 @@ static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ + { \ + switch (size) { \ + case 1: \ +- return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \ ++ return __cmpxchg_case##sfx##_8(ptr, (u8)old, new); \ + case 2: \ +- return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \ ++ return __cmpxchg_case##sfx##_16(ptr, (u16)old, new); \ + case 4: \ +- return __cmpxchg_case##sfx##_4(ptr, old, new); \ ++ return __cmpxchg_case##sfx##_32(ptr, old, new); \ + case 8: \ +- return __cmpxchg_case##sfx##_8(ptr, old, new); \ ++ return __cmpxchg_case##sfx##_64(ptr, old, new); \ + default: \ + BUILD_BUG(); \ + } \ +@@ -197,16 +197,16 @@ __CMPXCHG_GEN(_mb) + __ret; \ + }) + +-#define __CMPWAIT_CASE(w, sz, name) \ +-static inline void __cmpwait_case_##name(volatile void *ptr, \ +- unsigned long val) \ ++#define __CMPWAIT_CASE(w, sfx, sz) \ ++static inline void __cmpwait_case_##sz(volatile void *ptr, \ ++ unsigned long val) \ + { \ + unsigned long tmp; \ + \ + asm volatile( \ + " sevl\n" \ + " wfe\n" \ +- " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ ++ " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \ + " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ + " cbnz %" #w "[tmp], 1f\n" \ + " wfe\n" \ +@@ -215,10 +215,10 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \ + : [val] "r" (val)); \ + } + +-__CMPWAIT_CASE(w, b, 1); +-__CMPWAIT_CASE(w, h, 2); +-__CMPWAIT_CASE(w, , 4); +-__CMPWAIT_CASE( , , 8); ++__CMPWAIT_CASE(w, b, 8); ++__CMPWAIT_CASE(w, h, 16); ++__CMPWAIT_CASE(w, , 32); ++__CMPWAIT_CASE( , , 64); + + #undef __CMPWAIT_CASE + +@@ -229,13 +229,13 @@ static __always_inline void __cmpwait##sfx(volatile void *ptr, \ + { \ + switch (size) { \ + case 1: \ +- return __cmpwait_case##sfx##_1(ptr, (u8)val); \ ++ return __cmpwait_case##sfx##_8(ptr, (u8)val); \ + case 2: \ +- return __cmpwait_case##sfx##_2(ptr, (u16)val); \ ++ return __cmpwait_case##sfx##_16(ptr, (u16)val); \ + case 4: \ +- return __cmpwait_case##sfx##_4(ptr, val); \ ++ return __cmpwait_case##sfx##_32(ptr, val); \ + case 8: \ +- return __cmpwait_case##sfx##_8(ptr, val); \ ++ return __cmpwait_case##sfx##_64(ptr, val); \ + default: \ + BUILD_BUG(); \ + } \ +-- +2.30.1 + diff --git a/main/linux-vanilla/0002-arm64-Use-correct-ll-sc-atomic-constraints.patch b/main/linux-vanilla/0002-arm64-Use-correct-ll-sc-atomic-constraints.patch new file mode 100644 index 00000000000..2390c520d9b --- /dev/null +++ b/main/linux-vanilla/0002-arm64-Use-correct-ll-sc-atomic-constraints.patch @@ -0,0 +1,252 @@ +From 44f0d02f40ee3203fd3c6433be3407b826d94e42 Mon Sep 17 00:00:00 2001 +From: Andrew Murray <andrew.murray@arm.com> +Date: Wed, 28 Aug 2019 18:50:06 +0100 +Subject: [PATCH 2/2] arm64: Use correct ll/sc atomic constraints + +The A64 ISA accepts distinct (but overlapping) ranges of immediates for: + + * add arithmetic instructions ('I' machine constraint) + * sub arithmetic instructions ('J' machine constraint) + * 32-bit logical instructions ('K' machine constraint) + * 64-bit logical instructions ('L' machine constraint) + +... but we currently use the 'I' constraint for many atomic operations +using sub or logical instructions, which is not always valid. + +When CONFIG_ARM64_LSE_ATOMICS is not set, this allows invalid immediates +to be passed to instructions, potentially resulting in a build failure. +When CONFIG_ARM64_LSE_ATOMICS is selected the out-of-line ll/sc atomics +always use a register as they have no visibility of the value passed by +the caller. + +This patch adds a constraint parameter to the ATOMIC_xx and +__CMPXCHG_CASE macros so that we can pass appropriate constraints for +each case, with uses updated accordingly. + +Unfortunately prior to GCC 8.1.0 the 'K' constraint erroneously accepted +'4294967295', so we must instead force the use of a register. + +Signed-off-by: Andrew Murray <andrew.murray@arm.com> +Signed-off-by: Will Deacon <will@kernel.org> +(cherry picked from commit 580fa1b874711d633f9b145b7777b0e83ebf3787) +--- + arch/arm64/include/asm/atomic_ll_sc.h | 89 ++++++++++++++------------- + 1 file changed, 47 insertions(+), 42 deletions(-) + +diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h +index f02d3bf7b9e6..1cc42441bc67 100644 +--- a/arch/arm64/include/asm/atomic_ll_sc.h ++++ b/arch/arm64/include/asm/atomic_ll_sc.h +@@ -37,7 +37,7 @@ + * (the optimize attribute silently ignores these options). + */ + +-#define ATOMIC_OP(op, asm_op) \ ++#define ATOMIC_OP(op, asm_op, constraint) \ + __LL_SC_INLINE void \ + __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ + { \ +@@ -51,11 +51,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ + " stxr %w1, %w0, %2\n" \ + " cbnz %w1, 1b" \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ +- : "Ir" (i)); \ ++ : #constraint "r" (i)); \ + } \ + __LL_SC_EXPORT(atomic_##op); + +-#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ ++#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ + __LL_SC_INLINE int \ + __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ + { \ +@@ -70,14 +70,14 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ + " cbnz %w1, 1b\n" \ + " " #mb \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ +- : "Ir" (i) \ ++ : #constraint "r" (i) \ + : cl); \ + \ + return result; \ + } \ + __LL_SC_EXPORT(atomic_##op##_return##name); + +-#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ ++#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ + __LL_SC_INLINE int \ + __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ + { \ +@@ -92,7 +92,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ + " cbnz %w2, 1b\n" \ + " " #mb \ + : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ +- : "Ir" (i) \ ++ : #constraint "r" (i) \ + : cl); \ + \ + return result; \ +@@ -110,8 +110,8 @@ __LL_SC_EXPORT(atomic_fetch_##op##name); + ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ + ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) + +-ATOMIC_OPS(add, add) +-ATOMIC_OPS(sub, sub) ++ATOMIC_OPS(add, add, I) ++ATOMIC_OPS(sub, sub, J) + + #undef ATOMIC_OPS + #define ATOMIC_OPS(...) \ +@@ -121,17 +121,17 @@ ATOMIC_OPS(sub, sub) + ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ + ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) + +-ATOMIC_OPS(and, and) +-ATOMIC_OPS(andnot, bic) +-ATOMIC_OPS(or, orr) +-ATOMIC_OPS(xor, eor) ++ATOMIC_OPS(and, and, ) ++ATOMIC_OPS(andnot, bic, ) ++ATOMIC_OPS(or, orr, ) ++ATOMIC_OPS(xor, eor, ) + + #undef ATOMIC_OPS + #undef ATOMIC_FETCH_OP + #undef ATOMIC_OP_RETURN + #undef ATOMIC_OP + +-#define ATOMIC64_OP(op, asm_op) \ ++#define ATOMIC64_OP(op, asm_op, constraint) \ + __LL_SC_INLINE void \ + __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ + { \ +@@ -145,11 +145,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ + " stxr %w1, %0, %2\n" \ + " cbnz %w1, 1b" \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ +- : "Ir" (i)); \ ++ : #constraint "r" (i)); \ + } \ + __LL_SC_EXPORT(atomic64_##op); + +-#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ ++#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ + __LL_SC_INLINE long \ + __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ + { \ +@@ -164,14 +164,14 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ + " cbnz %w1, 1b\n" \ + " " #mb \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ +- : "Ir" (i) \ ++ : #constraint "r" (i) \ + : cl); \ + \ + return result; \ + } \ + __LL_SC_EXPORT(atomic64_##op##_return##name); + +-#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ ++#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ + __LL_SC_INLINE long \ + __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ + { \ +@@ -186,7 +186,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ + " cbnz %w2, 1b\n" \ + " " #mb \ + : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ +- : "Ir" (i) \ ++ : #constraint "r" (i) \ + : cl); \ + \ + return result; \ +@@ -204,8 +204,8 @@ __LL_SC_EXPORT(atomic64_fetch_##op##name); + ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ + ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) + +-ATOMIC64_OPS(add, add) +-ATOMIC64_OPS(sub, sub) ++ATOMIC64_OPS(add, add, I) ++ATOMIC64_OPS(sub, sub, J) + + #undef ATOMIC64_OPS + #define ATOMIC64_OPS(...) \ +@@ -215,10 +215,10 @@ ATOMIC64_OPS(sub, sub) + ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ + ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) + +-ATOMIC64_OPS(and, and) +-ATOMIC64_OPS(andnot, bic) +-ATOMIC64_OPS(or, orr) +-ATOMIC64_OPS(xor, eor) ++ATOMIC64_OPS(and, and, L) ++ATOMIC64_OPS(andnot, bic, ) ++ATOMIC64_OPS(or, orr, L) ++ATOMIC64_OPS(xor, eor, L) + + #undef ATOMIC64_OPS + #undef ATOMIC64_FETCH_OP +@@ -248,7 +248,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) + } + __LL_SC_EXPORT(atomic64_dec_if_positive); + +-#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \ ++#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \ + __LL_SC_INLINE u##sz \ + __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ + unsigned long old, \ +@@ -268,29 +268,34 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ + "2:" \ + : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ + [v] "+Q" (*(u##sz *)ptr) \ +- : [old] "Lr" (old), [new] "r" (new) \ ++ : [old] #constraint "r" (old), [new] "r" (new) \ + : cl); \ + \ + return oldval; \ + } \ + __LL_SC_EXPORT(__cmpxchg_case_##name##sz); + +-__CMPXCHG_CASE(w, b, , 8, , , , ) +-__CMPXCHG_CASE(w, h, , 16, , , , ) +-__CMPXCHG_CASE(w, , , 32, , , , ) +-__CMPXCHG_CASE( , , , 64, , , , ) +-__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory") +-__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory") +-__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory") +-__CMPXCHG_CASE( , , acq_, 64, , a, , "memory") +-__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory") +-__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory") +-__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory") +-__CMPXCHG_CASE( , , rel_, 64, , , l, "memory") +-__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory") +-__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory") +-__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory") +-__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory") ++/* ++ * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly ++ * handle the 'K' constraint for the value 4294967295 - thus we use no ++ * constraint for 32 bit operations. ++ */ ++__CMPXCHG_CASE(w, b, , 8, , , , , ) ++__CMPXCHG_CASE(w, h, , 16, , , , , ) ++__CMPXCHG_CASE(w, , , 32, , , , , ) ++__CMPXCHG_CASE( , , , 64, , , , , L) ++__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", ) ++__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", ) ++__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", ) ++__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L) ++__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", ) ++__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", ) ++__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", ) ++__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L) ++__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", ) ++__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", ) ++__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", ) ++__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) + + #undef __CMPXCHG_CASE + +-- +2.30.1 + diff --git a/main/linux-vanilla/APKBUILD b/main/linux-vanilla/APKBUILD index 72a5a5443bc..c107d5510e5 100644 --- a/main/linux-vanilla/APKBUILD +++ b/main/linux-vanilla/APKBUILD @@ -17,6 +17,9 @@ options="!strip" _config=${config:-config-vanilla.${CARCH}} install= source="https://cdn.kernel.org/pub/linux/kernel/v${pkgver%%.*}.x/linux-$_kernver.tar.xz + 0001-arm64-Avoid-redundant-type-conversions-in-xchg-and-c.patch + 0002-arm64-Use-correct-ll-sc-atomic-constraints.patch + config-vanilla.aarch64 config-vanilla.armhf config-vanilla.armv7 @@ -228,14 +231,16 @@ _dev() { } sha512sums="ab67cc746b375a8b135e8b23e35e1d6787930d19b3c26b2679787d62951cbdbc3bb66f8ededeb9b890e5008b2459397f9018f1a6772fdef67780b06a4cb9f6f4 linux-4.19.tar.xz -865231541bc54858a1a37b8106701fa7efdf09d2c67a2a62395c19a22d321f9b491b8added3aad391f92b885533ab90415b803c6f21a89cfc3d1da9a95cf31f2 config-vanilla.aarch64 +b6ca08d280358402f39e184ca4670e7f0216a8129ad54128ca92b3a8b0c1ac3ef04fa1a0ddbf0aee5f9a94ec4607e1e1f0e14d7684416fd04b0552b2aa39f986 0001-arm64-Avoid-redundant-type-conversions-in-xchg-and-c.patch +2387d6abd2947a2aa8da51dce8b0eeb432b30ed6e7e26e43e6851011aa5a3a784d8a78cf09dfad9598cfd9608e1b722708787730f0714fa734acd87e0f0df82d 0002-arm64-Use-correct-ll-sc-atomic-constraints.patch +0371a31ff6af76824bc443a253ebfad7594121a2081c94029fa60db3ac34057da1bc5ea9c2be647fd71732a4303c8e20981091e9b88e518b50f6f14baef8f141 config-vanilla.aarch64 60d58456547437829df739d0a58e0ae4b716d877e5e0b6512a5e60d0a2fba8c5adf14ef8f89c0dcb371d66e32a90796926be1cf6dd32779084796e071e5c1fd0 config-vanilla.armhf 60d58456547437829df739d0a58e0ae4b716d877e5e0b6512a5e60d0a2fba8c5adf14ef8f89c0dcb371d66e32a90796926be1cf6dd32779084796e071e5c1fd0 config-vanilla.armv7 e835acb24d8b395cfd29a7f1af1510df097d8a2315558ddd6f7eba7490f9763afbc64d2a9a084a367d53bce911413d603e577e957bcbf4e4a1066e57a51e6d70 config-vanilla.x86 ecbc5b5e2cc4b81a881cd543bc57ea691fc8600dc52d465085912c31d271c9a0e39926c1a06843706ad8907c147b7dbcb3b5324aaf4b9139baa61e51f1e6930a config-vanilla.x86_64 a805810bab50a5850248ae15f01cdadcf227cc808af472bc58e0c18305d1659e2e6f3796710beb22388dc5ea293e3cf4293daafa869e807dfe021710d7828e42 config-vanilla.ppc64le cffd64189ec33ca8a93f81252d718a1f6699ca45e169315f91dba6ff3342d6c5fed20834a879f702f63afa76545a29bc27b1c4f368f1fbcc23d0ca7de0dc1b64 config-vanilla.s390x -d2951cf4a4557512c5a10c9f3a3b0b7405e18fbe86684024dc2e7a54658f3474d6a188d314bc582dd50310b7a1f7218bfde4771b22033cddb63983836e4788e4 config-virt.aarch64 +9709525ae51b3bade186c64897f93e2fe90d878e5d94605320a9cc747b8a0e3164ecf6143c929d5be161648476bb36bdd7a371f7221ca9bf96f8d3f4e79b872d config-virt.aarch64 ee0dcae6e6f0db5342ce21fa4dd78acc417045a84d9758a7e6650d3bed5b4b304eea57b5d3e0a1109d04f79d02e9e7f4c9a69268d477ea6c4435df092cda8119 config-virt.x86 c2b17dbc82c3f995bb32428f42f63ab3f537289b6fd4d0395dc5112273636a1dbba9547f66a75e9e0773713e09c514148ddd3e1a216475b862295318861cfdf1 config-virt.x86_64 9bb51df1822242aee8340b8d54b5d1eb9bab8c0fff37a5b671f2ab7d10e5b3f1bd9f6a7e13af600434cc406a42b6638a5659cc056917c44a158bf243b5383146 patch-4.19.176.xz" diff --git a/main/linux-vanilla/config-vanilla.aarch64 b/main/linux-vanilla/config-vanilla.aarch64 index 5ef9347f8d7..df9042d3bdb 100644 --- a/main/linux-vanilla/config-vanilla.aarch64 +++ b/main/linux-vanilla/config-vanilla.aarch64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.19.118 Kernel Configuration +# Linux/arm64 4.19.176 Kernel Configuration # # @@ -378,6 +378,7 @@ CONFIG_ARM64_ERRATUM_845719=y CONFIG_ARM64_ERRATUM_843419=y CONFIG_ARM64_ERRATUM_1024718=y CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_ARM64_ERRATUM_1542419=y CONFIG_CAVIUM_ERRATUM_22375=y CONFIG_CAVIUM_ERRATUM_23144=y CONFIG_CAVIUM_ERRATUM_23154=y @@ -2153,7 +2154,6 @@ CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_ENCLOSURE=m @@ -3830,6 +3830,7 @@ CONFIG_SPI_SPIDEV=m # CONFIG_SPI_LOOPBACK_TEST is not set CONFIG_SPI_TLE62X0=m # CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y # CONFIG_SPMI is not set # CONFIG_HSI is not set CONFIG_PPS=y @@ -5990,7 +5991,6 @@ CONFIG_USB_IDMOUSE=m CONFIG_USB_FTDI_ELAN=m # CONFIG_USB_APPLEDISPLAY is not set CONFIG_USB_SISUSBVGA=m -CONFIG_USB_SISUSBVGA_CON=y CONFIG_USB_LD=m # CONFIG_USB_TRANCEVIBRATOR is not set CONFIG_USB_IOWARRIOR=m @@ -6599,6 +6599,8 @@ CONFIG_TIMER_OF=y CONFIG_TIMER_ACPI=y CONFIG_TIMER_PROBE=y CONFIG_CLKSRC_MMIO=y +CONFIG_DW_APB_TIMER=y +CONFIG_DW_APB_TIMER_OF=y CONFIG_ROCKCHIP_TIMER=y CONFIG_ARM_ARCH_TIMER=y CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y @@ -7740,6 +7742,7 @@ CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_STACK_TRACER is not set # CONFIG_BLK_DEV_IO_TRACE is not set CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set CONFIG_UPROBE_EVENTS=y CONFIG_PROBE_EVENTS=y CONFIG_DYNAMIC_FTRACE=y diff --git a/main/linux-vanilla/config-virt.aarch64 b/main/linux-vanilla/config-virt.aarch64 index 8cb928b1f20..36ead24c36e 100644 --- a/main/linux-vanilla/config-virt.aarch64 +++ b/main/linux-vanilla/config-virt.aarch64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.19.118 Kernel Configuration +# Linux/arm64 4.19.176 Kernel Configuration # # @@ -361,6 +361,7 @@ CONFIG_ARM64_ERRATUM_834220=y CONFIG_ARM64_ERRATUM_843419=y CONFIG_ARM64_ERRATUM_1024718=y CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_ARM64_ERRATUM_1542419=y CONFIG_CAVIUM_ERRATUM_22375=y CONFIG_CAVIUM_ERRATUM_23144=y CONFIG_CAVIUM_ERRATUM_23154=y @@ -1849,7 +1850,6 @@ CONFIG_BLK_DEV_SD=m # CONFIG_CHR_DEV_ST is not set # CONFIG_CHR_DEV_OSST is not set CONFIG_BLK_DEV_SR=m -# CONFIG_BLK_DEV_SR_VENDOR is not set CONFIG_CHR_DEV_SG=m # CONFIG_CHR_DEV_SCH is not set # CONFIG_SCSI_CONSTANTS is not set |