aboutsummaryrefslogtreecommitdiffstats
path: root/main/linux-grsec/grsecurity-2.2.2-3.2.1-201201221501.patch
diff options
context:
space:
mode:
Diffstat (limited to 'main/linux-grsec/grsecurity-2.2.2-3.2.1-201201221501.patch')
-rw-r--r--main/linux-grsec/grsecurity-2.2.2-3.2.1-201201221501.patch78764
1 files changed, 78764 insertions, 0 deletions
diff --git a/main/linux-grsec/grsecurity-2.2.2-3.2.1-201201221501.patch b/main/linux-grsec/grsecurity-2.2.2-3.2.1-201201221501.patch
new file mode 100644
index 0000000000..ff965370cf
--- /dev/null
+++ b/main/linux-grsec/grsecurity-2.2.2-3.2.1-201201221501.patch
@@ -0,0 +1,78764 @@
+diff --git a/Documentation/dontdiff b/Documentation/dontdiff
+index dfa6fc6..0095943 100644
+--- a/Documentation/dontdiff
++++ b/Documentation/dontdiff
+@@ -5,6 +5,7 @@
+ *.cis
+ *.cpio
+ *.csp
++*.dbg
+ *.dsp
+ *.dvi
+ *.elf
+@@ -14,6 +15,7 @@
+ *.gcov
+ *.gen.S
+ *.gif
++*.gmo
+ *.grep
+ *.grp
+ *.gz
+@@ -48,9 +50,11 @@
+ *.tab.h
+ *.tex
+ *.ver
++*.vim
+ *.xml
+ *.xz
+ *_MODULES
++*_reg_safe.h
+ *_vga16.c
+ *~
+ \#*#
+@@ -70,6 +74,7 @@ Kerntypes
+ Module.markers
+ Module.symvers
+ PENDING
++PERF*
+ SCCS
+ System.map*
+ TAGS
+@@ -93,19 +98,24 @@ bounds.h
+ bsetup
+ btfixupprep
+ build
++builtin-policy.h
+ bvmlinux
+ bzImage*
+ capability_names.h
+ capflags.c
+ classlist.h*
++clut_vga16.c
++common-cmds.h
+ comp*.log
+ compile.h*
+ conf
+ config
+ config-*
+ config_data.h*
++config.c
+ config.mak
+ config.mak.autogen
++config.tmp
+ conmakehash
+ consolemap_deftbl.c*
+ cpustr.h
+@@ -119,6 +129,7 @@ dslm
+ elf2ecoff
+ elfconfig.h*
+ evergreen_reg_safe.h
++exception_policy.conf
+ fixdep
+ flask.h
+ fore200e_mkfirm
+@@ -126,12 +137,15 @@ fore200e_pca_fw.c*
+ gconf
+ gconf.glade.h
+ gen-devlist
++gen-kdb_cmds.c
+ gen_crc32table
+ gen_init_cpio
+ generated
+ genheaders
+ genksyms
+ *_gray256.c
++hash
++hid-example
+ hpet_example
+ hugepage-mmap
+ hugepage-shm
+@@ -146,7 +160,7 @@ int32.c
+ int4.c
+ int8.c
+ kallsyms
+-kconfig
++kern_constants.h
+ keywords.c
+ ksym.c*
+ ksym.h*
+@@ -154,7 +168,6 @@ kxgettext
+ lkc_defs.h
+ lex.c
+ lex.*.c
+-linux
+ logo_*.c
+ logo_*_clut224.c
+ logo_*_mono.c
+@@ -166,14 +179,15 @@ machtypes.h
+ map
+ map_hugetlb
+ maui_boot.h
+-media
+ mconf
++mdp
+ miboot*
+ mk_elfconfig
+ mkboot
+ mkbugboot
+ mkcpustr
+ mkdep
++mkpiggy
+ mkprep
+ mkregtable
+ mktables
+@@ -209,6 +223,7 @@ r300_reg_safe.h
+ r420_reg_safe.h
+ r600_reg_safe.h
+ recordmcount
++regdb.c
+ relocs
+ rlim_names.h
+ rn50_reg_safe.h
+@@ -219,6 +234,7 @@ setup
+ setup.bin
+ setup.elf
+ sImage
++slabinfo
+ sm_tbl*
+ split-include
+ syscalltab.h
+@@ -229,6 +245,7 @@ tftpboot.img
+ timeconst.h
+ times.h*
+ trix_boot.h
++user_constants.h
+ utsrelease.h*
+ vdso-syms.lds
+ vdso.lds
+@@ -246,7 +263,9 @@ vmlinux
+ vmlinux-*
+ vmlinux.aout
+ vmlinux.bin.all
++vmlinux.bin.bz2
+ vmlinux.lds
++vmlinux.relocs
+ vmlinuz
+ voffset.h
+ vsyscall.lds
+@@ -254,9 +273,11 @@ vsyscall_32.lds
+ wanxlfw.inc
+ uImage
+ unifdef
++utsrelease.h
+ wakeup.bin
+ wakeup.elf
+ wakeup.lds
+ zImage*
+ zconf.hash.c
++zconf.lex.c
+ zoffset.h
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 81c287f..d456d02 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ the specified number of seconds. This is to be used if
+ your oopses keep scrolling off the screen.
+
++ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
++ virtualization environments that don't cope well with the
++ expand down segment used by UDEREF on X86-32 or the frequent
++ page table updates on X86-64.
++
++ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
++
+ pcbit= [HW,ISDN]
+
+ pcd. [PARIDE]
+diff --git a/Makefile b/Makefile
+index c5edffa..26654d7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+
+ HOSTCC = gcc
+ HOSTCXX = g++
+-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
+-HOSTCXXFLAGS = -O2
++HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
++HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
++HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
+
+ # Decide whether to build built-in, modular, or both.
+ # Normally, just do built-in.
+@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
+ # Rules shared between *config targets and build targets
+
+ # Basic helpers built in scripts/
+-PHONY += scripts_basic
+-scripts_basic:
++PHONY += scripts_basic gcc-plugins
++scripts_basic: gcc-plugins
+ $(Q)$(MAKE) $(build)=scripts/basic
+ $(Q)rm -f .tmp_quiet_recordmcount
+
+@@ -564,6 +565,46 @@ else
+ KBUILD_CFLAGS += -O2
+ endif
+
++ifndef DISABLE_PAX_PLUGINS
++ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
++ifndef DISABLE_PAX_CONSTIFY_PLUGIN
++CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
++endif
++ifdef CONFIG_PAX_MEMORY_STACKLEAK
++STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
++STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
++endif
++ifdef CONFIG_KALLOCSTAT_PLUGIN
++KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
++endif
++ifdef CONFIG_PAX_KERNEXEC_PLUGIN
++KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
++KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
++endif
++ifdef CONFIG_CHECKER_PLUGIN
++ifeq ($(call cc-ifversion, -ge, 0406, y), y)
++CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
++endif
++endif
++GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
++export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
++ifeq ($(KBUILD_EXTMOD),)
++gcc-plugins:
++ $(Q)$(MAKE) $(build)=tools/gcc
++else
++gcc-plugins: ;
++endif
++else
++gcc-plugins:
++ifeq ($(call cc-ifversion, -ge, 0405, y), y)
++ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
++else
++ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
++endif
++ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
++endif
++endif
++
+ include $(srctree)/arch/$(SRCARCH)/Makefile
+
+ ifneq ($(CONFIG_FRAME_WARN),0)
+@@ -708,7 +749,7 @@ export mod_strip_cmd
+
+
+ ifeq ($(KBUILD_EXTMOD),)
+-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+
+ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+@@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
+
+ # The actual objects are generated when descending,
+ # make sure no implicit rule kicks in
++$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
+ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+
+ # Handle descending into subdirectories listed in $(vmlinux-dirs)
+@@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+ # Error messages still appears in the original language
+
+ PHONY += $(vmlinux-dirs)
+-$(vmlinux-dirs): prepare scripts
++$(vmlinux-dirs): gcc-plugins prepare scripts
+ $(Q)$(MAKE) $(build)=$@
+
+ # Store (new) KERNELRELASE string in include/config/kernel.release
+@@ -985,6 +1027,7 @@ prepare0: archprepare FORCE
+ $(Q)$(MAKE) $(build)=.
+
+ # All the preparing..
++prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
+ prepare: prepare0
+
+ # Generate some files
+@@ -1086,6 +1129,7 @@ all: modules
+ # using awk while concatenating to the final file.
+
+ PHONY += modules
++modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
+ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
+ $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
+ @$(kecho) ' Building modules, stage 2.';
+@@ -1101,7 +1145,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+
+ # Target to prepare building external modules
+ PHONY += modules_prepare
+-modules_prepare: prepare scripts
++modules_prepare: gcc-plugins prepare scripts
+
+ # Target to install modules
+ PHONY += modules_install
+@@ -1198,6 +1242,7 @@ distclean: mrproper
+ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
+ -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
+ -o -name '.*.rej' \
++ -o -name '.*.rej' -o -name '*.so' \
+ -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
+ -type f -print | xargs rm -f
+
+@@ -1358,6 +1403,7 @@ PHONY += $(module-dirs) modules
+ $(module-dirs): crmodverdir $(objtree)/Module.symvers
+ $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
+
++modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
+ modules: $(module-dirs)
+ @$(kecho) ' Building modules, stage 2.';
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+@@ -1484,17 +1530,19 @@ else
+ target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
+ endif
+
+-%.s: %.c prepare scripts FORCE
++%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
++%.s: %.c gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.i: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.o: %.c prepare scripts FORCE
++%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
++%.o: %.c gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.lst: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.s: %.S prepare scripts FORCE
++%.s: %.S gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.o: %.S prepare scripts FORCE
++%.o: %.S gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.symtypes: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+@@ -1504,11 +1552,13 @@ endif
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir)
+-%/: prepare scripts FORCE
++%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
++%/: gcc-plugins prepare scripts FORCE
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir)
+-%.ko: prepare scripts FORCE
++%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
++%.ko: gcc-plugins prepare scripts FORCE
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir) $(@:.ko=.o)
+diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
+index da5449e..7418343 100644
+--- a/arch/alpha/include/asm/elf.h
++++ b/arch/alpha/include/asm/elf.h
+@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
+index de98a73..bd4f1f8 100644
+--- a/arch/alpha/include/asm/pgtable.h
++++ b/arch/alpha/include/asm/pgtable.h
+@@ -101,6 +101,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
+index 2fd00b7..cfd5069 100644
+--- a/arch/alpha/kernel/module.c
++++ b/arch/alpha/kernel/module.c
+@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+- gp = (u64)me->module_core + me->core_size - 0x8000;
++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
+index 01e8715..be0e80f 100644
+--- a/arch/alpha/kernel/osf_sys.c
++++ b/arch/alpha/kernel/osf_sys.c
+@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (limit - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ vma = vma->vm_next;
+@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+ if (addr != (unsigned long) -ENOMEM)
+@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index fadd5f8..904e73a 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long *)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++ do_group_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
+index 86976d0..8a57797 100644
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -239,6 +239,14 @@ typedef struct {
+ u64 __aligned(8) counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(i) { (i) }
+
+ static inline u64 atomic64_read(atomic64_t *v)
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index 0e9ce8d..6ef1e03 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ extern void elf_set_personality(const struct elf32_hdr *);
+ #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
+
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ extern int vectors_user_mapping(void);
+ #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
+index e51b1e8..32a3113 100644
+--- a/arch/arm/include/asm/kmap_types.h
++++ b/arch/arm/include/asm/kmap_types.h
+@@ -21,6 +21,7 @@ enum km_type {
+ KM_L1_CACHE,
+ KM_L2_CACHE,
+ KM_KDB,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index b293616..96310e5 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -22,6 +22,8 @@
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+@@ -387,8 +389,23 @@ do { \
+
+
+ #ifdef CONFIG_MMU
+-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
+-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
++extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
++extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
++
++static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ return ___copy_from_user(to, from, n);
++}
++
++static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ return ___copy_to_user(to, from, n);
++}
++
+ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
+ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
+
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else /* security hole - plug it */
+@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
+index 5b0bce6..becd81c 100644
+--- a/arch/arm/kernel/armksyms.c
++++ b/arch/arm/kernel/armksyms.c
+@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
+ #ifdef CONFIG_MMU
+ EXPORT_SYMBOL(copy_page);
+
+-EXPORT_SYMBOL(__copy_from_user);
+-EXPORT_SYMBOL(__copy_to_user);
++EXPORT_SYMBOL(___copy_from_user);
++EXPORT_SYMBOL(___copy_to_user);
+ EXPORT_SYMBOL(__clear_user);
+
+ EXPORT_SYMBOL(__get_user_1);
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 3d0c6fb..3dcae52 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -28,7 +28,6 @@
+ #include <linux/tick.h>
+ #include <linux/utsname.h>
+ #include <linux/uaccess.h>
+-#include <linux/random.h>
+ #include <linux/hw_breakpoint.h>
+ #include <linux/cpuidle.h>
+
+@@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
+ return 0;
+ }
+
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
+-
+ #ifdef CONFIG_MMU
+ /*
+ * The vectors page is always readable from user space for the
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 99a5727..a3d5bb1 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
+
+ static DEFINE_RAW_SPINLOCK(die_lock);
+
++extern void gr_handle_kernel_exploit(void);
++
+ /*
+ * This function is protected against re-entrancy.
+ */
+@@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
++
++ gr_handle_kernel_exploit();
++
+ if (ret != NOTIFY_STOP)
+ do_exit(SIGSEGV);
+ }
+diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
+index 66a477a..bee61d3 100644
+--- a/arch/arm/lib/copy_from_user.S
++++ b/arch/arm/lib/copy_from_user.S
+@@ -16,7 +16,7 @@
+ /*
+ * Prototype:
+ *
+- * size_t __copy_from_user(void *to, const void *from, size_t n)
++ * size_t ___copy_from_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+@@ -84,11 +84,11 @@
+
+ .text
+
+-ENTRY(__copy_from_user)
++ENTRY(___copy_from_user)
+
+ #include "copy_template.S"
+
+-ENDPROC(__copy_from_user)
++ENDPROC(___copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
+index d066df6..df28194 100644
+--- a/arch/arm/lib/copy_to_user.S
++++ b/arch/arm/lib/copy_to_user.S
+@@ -16,7 +16,7 @@
+ /*
+ * Prototype:
+ *
+- * size_t __copy_to_user(void *to, const void *from, size_t n)
++ * size_t ___copy_to_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+@@ -88,11 +88,11 @@
+ .text
+
+ ENTRY(__copy_to_user_std)
+-WEAK(__copy_to_user)
++WEAK(___copy_to_user)
+
+ #include "copy_template.S"
+
+-ENDPROC(__copy_to_user)
++ENDPROC(___copy_to_user)
+ ENDPROC(__copy_to_user_std)
+
+ .pushsection .fixup,"ax"
+diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
+index d0ece2a..5ae2f39 100644
+--- a/arch/arm/lib/uaccess.S
++++ b/arch/arm/lib/uaccess.S
+@@ -20,7 +20,7 @@
+
+ #define PAGE_SHIFT 12
+
+-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
++/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
+ * Purpose : copy a block to user memory from kernel memory
+ * Params : to - user memory
+ * : from - kernel memory
+@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
+ sub r2, r2, ip
+ b .Lc2u_dest_aligned
+
+-ENTRY(__copy_to_user)
++ENTRY(___copy_to_user)
+ stmfd sp!, {r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lc2u_not_enough
+@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #0
+ USER( T(strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+-ENDPROC(__copy_to_user)
++ENDPROC(___copy_to_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+ 9001: ldmfd sp!, {r0, r4 - r7, pc}
+ .popsection
+
+-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
++/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
+ * Purpose : copy a block from user memory to kernel memory
+ * Params : to - kernel memory
+ * : from - user memory
+@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
+ sub r2, r2, ip
+ b .Lcfu_dest_aligned
+
+-ENTRY(__copy_from_user)
++ENTRY(___copy_from_user)
+ stmfd sp!, {r0, r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lcfu_not_enough
+@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
+ USER( T(ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+-ENDPROC(__copy_from_user)
++ENDPROC(___copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
+index 025f742..8432b08 100644
+--- a/arch/arm/lib/uaccess_with_memcpy.c
++++ b/arch/arm/lib/uaccess_with_memcpy.c
+@@ -104,7 +104,7 @@ out:
+ }
+
+ unsigned long
+-__copy_to_user(void __user *to, const void *from, unsigned long n)
++___copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ /*
+ * This test is stubbed out of the main function above to keep
+diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
+index 2b2d51c..0127490 100644
+--- a/arch/arm/mach-ux500/mbox-db5500.c
++++ b/arch/arm/mach-ux500/mbox-db5500.c
+@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
+ return sprintf(buf, "0x%X\n", mbox_value);
+ }
+
+-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
++static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
+
+ static int mbox_show(struct seq_file *s, void *data)
+ {
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index aa33949..b242a2f 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (fsr & FSR_LNX_PF) {
++ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ tsk->thread.address = addr;
+ tsk->thread.error_code = fsr;
+ tsk->thread.trap_no = 14;
+@@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ }
+ #endif /* CONFIG_MMU */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (__force unsigned char __user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-4: ");
++ for (i = -1; i < 20; i++) {
++ unsigned long c;
++ if (get_user(c, (__force unsigned long __user *)sp+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08lx ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * First Level Translation Fault Handler
+ *
+diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
+index 44b628e..623ee2a 100644
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+ /* 8 bits of randomness in 20 address space bits */
+ if ((current->flags & PF_RANDOMIZE) &&
+@@ -89,14 +92,14 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
+index 3b3159b..425ea94 100644
+--- a/arch/avr32/include/asm/elf.h
++++ b/arch/avr32/include/asm/elf.h
+@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
+index b7f5c68..556135c 100644
+--- a/arch/avr32/include/asm/kmap_types.h
++++ b/arch/avr32/include/asm/kmap_types.h
+@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
+ D(11) KM_IRQ1,
+ D(12) KM_SOFTIRQ0,
+ D(13) KM_SOFTIRQ1,
+-D(14) KM_TYPE_NR
++D(14) KM_CLEARPAGE,
++D(15) KM_TYPE_NR
+ };
+
+ #undef D
+diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
+index f7040a1..db9f300 100644
+--- a/arch/avr32/mm/fault.c
++++ b/arch/avr32/mm/fault.c
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
+
+ int exception_trace = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+@@ -156,6 +173,16 @@ bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ if (exception_trace && printk_ratelimit())
+ printk("%s%s[%d]: segfault at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
+index f8e16b2..c73ff79 100644
+--- a/arch/frv/include/asm/kmap_types.h
++++ b/arch/frv/include/asm/kmap_types.h
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
+index 385fd30..6c3d97e 100644
+--- a/arch/frv/mm/elf-fdpic.c
++++ b/arch/frv/mm/elf-fdpic.c
+@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ goto success;
+ }
+
+@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
+@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
+diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
+index b5298eb..67c6e62 100644
+--- a/arch/ia64/include/asm/elf.h
++++ b/arch/ia64/include/asm/elf.h
+@@ -42,6 +42,13 @@
+ */
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND 0x70000001
+
+ /* IA-64 relocations: */
+diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
+index 1a97af3..7529d31 100644
+--- a/arch/ia64/include/asm/pgtable.h
++++ b/arch/ia64/include/asm/pgtable.h
+@@ -12,7 +12,7 @@
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+-
++#include <linux/const.h>
+ #include <asm/mman.h>
+ #include <asm/page.h>
+ #include <asm/processor.h>
+@@ -143,6 +143,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
+index b77768d..e0795eb 100644
+--- a/arch/ia64/include/asm/spinlock.h
++++ b/arch/ia64/include/asm/spinlock.h
+@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+ unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
+
+ asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
++ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
+ }
+
+ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
+index 449c8c0..432a3d2 100644
+--- a/arch/ia64/include/asm/uaccess.h
++++ b/arch/ia64/include/asm/uaccess.h
+@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ const void *__cu_from = (from); \
+ long __cu_len = (n); \
+ \
+- if (__access_ok(__cu_to, __cu_len, get_fs())) \
++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ __cu_len; \
+ })
+@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ long __cu_len = (n); \
+ \
+ __chk_user_ptr(__cu_from); \
+- if (__access_ok(__cu_from, __cu_len, get_fs())) \
++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
+ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
+ __cu_len; \
+ })
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index 24603be..948052d 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
+ void
+ module_free (struct module *mod, void *module_region)
+ {
+- if (mod && mod->arch.init_unw_table &&
+- module_region == mod->module_init) {
++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
+ unw_remove_unwind_table(mod->arch.init_unw_table);
+ mod->arch.init_unw_table = NULL;
+ }
+@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
+ }
+
+ static inline int
++in_init_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
++}
++
++static inline int
++in_init_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
++}
++
++static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_init < mod->init_size;
++ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
++}
++
++static inline int
++in_core_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
++}
++
++static inline int
++in_core_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
+ }
+
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_core < mod->core_size;
++ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
+ }
+
+ static inline int
+@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
+ break;
+
+ case RV_BDREL:
+- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
++ if (in_init_rx(mod, val))
++ val -= (uint64_t) mod->module_init_rx;
++ else if (in_init_rw(mod, val))
++ val -= (uint64_t) mod->module_init_rw;
++ else if (in_core_rx(mod, val))
++ val -= (uint64_t) mod->module_core_rx;
++ else if (in_core_rw(mod, val))
++ val -= (uint64_t) mod->module_core_rw;
+ break;
+
+ case RV_LTV:
+@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
+ * addresses have been selected...
+ */
+ uint64_t gp;
+- if (mod->core_size > MAX_LTOFF)
++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
+ /*
+ * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+ * at the end of the module.
+ */
+- gp = mod->core_size - MAX_LTOFF / 2;
++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
+ else
+- gp = mod->core_size / 2;
+- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
++ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
+ mod->arch.gp = gp;
+ DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+ }
+diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
+index 609d500..7dde2a8 100644
+--- a/arch/ia64/kernel/sys_ia64.c
++++ b/arch/ia64/kernel/sys_ia64.c
+@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ if (REGION_NUMBER(addr) == RGN_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = mm->free_area_cache;
+
+@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
++ if (start_addr != mm->mmap_base) {
+ /* Start a new search --- just in case we missed some holes. */
+- addr = TASK_UNMAPPED_BASE;
++ addr = mm->mmap_base;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* Remember the address where we stopped this search: */
+ mm->free_area_cache = addr + len;
+ return addr;
+diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
+index 53c0ba0..2accdde 100644
+--- a/arch/ia64/kernel/vmlinux.lds.S
++++ b/arch/ia64/kernel/vmlinux.lds.S
+@@ -199,7 +199,7 @@ SECTIONS {
+ /* Per-cpu data: */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
+- __phys_per_cpu_start = __per_cpu_load;
++ __phys_per_cpu_start = per_cpu_load;
+ /*
+ * ensure percpu data fits
+ * into percpu page size
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index 20b3593..1ce77f0 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void __kprobes
+ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
+ {
+@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
+ mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
+
++ }
++
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+ * sure we exit gracefully rather than endlessly redo the
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index 5ca674b..e0e1b70 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+- if (!vmm || (addr + len) <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
+ }
+diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
+index 00cb0e2..2ad8024 100644
+--- a/arch/ia64/mm/init.c
++++ b/arch/ia64/mm/init.c
+@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+ vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++ vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ down_write(&current->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
+index 82abd15..d95ae5d 100644
+--- a/arch/m32r/lib/usercopy.c
++++ b/arch/m32r/lib/usercopy.c
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetchw(to);
+ if (access_ok(VERIFY_READ, from, n))
+ __copy_user_zeroing(to,from,n);
+diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
+index 455c0ac..ad65fbe 100644
+--- a/arch/mips/include/asm/elf.h
++++ b/arch/mips/include/asm/elf.h
+@@ -372,13 +372,16 @@ extern const char *__elf_platform;
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ struct linux_binprm;
+ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* _ASM_ELF_H */
+diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
+index e59cd1a..8e329d6 100644
+--- a/arch/mips/include/asm/page.h
++++ b/arch/mips/include/asm/page.h
+@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
+ #ifdef CONFIG_CPU_MIPS32
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+ #else
+ typedef struct { unsigned long long pte; } pte_t;
+ #define pte_val(x) ((x).pte)
+diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
+index 6018c80..7c37203 100644
+--- a/arch/mips/include/asm/system.h
++++ b/arch/mips/include/asm/system.h
+@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
+ */
+ #define __ARCH_WANT_UNLOCKED_CTXSW
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* _ASM_SYSTEM_H */
+diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
+index 9fdd8bc..4bd7f1a 100644
+--- a/arch/mips/kernel/binfmt_elfn32.c
++++ b/arch/mips/kernel/binfmt_elfn32.c
+@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/elfcore.h>
+diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
+index ff44823..97f8906 100644
+--- a/arch/mips/kernel/binfmt_elfo32.c
++++ b/arch/mips/kernel/binfmt_elfo32.c
+@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+
+ /*
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index c47f96e..661d418 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
+ out:
+ return pc;
+ }
+-
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+-
+- return sp & ALMASK;
+-}
+diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
+index 937cf33..adb39bb 100644
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
+@@ -28,6 +28,23 @@
+ #include <asm/highmem.h> /* For VMALLOC_END */
+ #include <linux/kdebug.h>
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
+index 302d779..7d35bf8 100644
+--- a/arch/mips/mm/mmap.c
++++ b/arch/mips/mm/mmap.c
+@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ do_color_align = 1;
+
+ /* requesting a specific address */
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ }
+
+@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ if (do_color_align)
+@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr - len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vmm, addr - len, len))
+ /* cache the address as a hint for next time */
+ return mm->free_area_cache = addr - len;
+ }
+@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (check_heap_stack_gap(vmm, addr, len)) {
+ /* cache the address as a hint for next time */
+ return mm->free_area_cache = addr;
+ }
+@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+ }
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = get_random_int();
+-
+- rnd = rnd << PAGE_SHIFT;
+- /* 8MB for 32bit, 256MB for 64bit */
+- if (TASK_IS_32BIT_ADDR)
+- rnd = rnd & 0x7ffffful;
+- else
+- rnd = rnd & 0xffffffful;
+-
+- return rnd;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
+index 19f6cb1..6c78cf2 100644
+--- a/arch/parisc/include/asm/elf.h
++++ b/arch/parisc/include/asm/elf.h
+@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index 22dadeb..f6c2be4 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -210,6 +210,17 @@ struct vm_area_struct;
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
+ #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
+diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
+index 5e34ccf..672bc9c 100644
+--- a/arch/parisc/kernel/module.c
++++ b/arch/parisc/kernel/module.c
+@@ -98,16 +98,38 @@
+
+ /* three functions to determine where in the module core
+ * or init pieces the location is */
++static inline int in_init_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rx &&
++ loc < (me->module_init_rx + me->init_size_rx));
++}
++
++static inline int in_init_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rw &&
++ loc < (me->module_init_rw + me->init_size_rw));
++}
++
+ static inline int in_init(struct module *me, void *loc)
+ {
+- return (loc >= me->module_init &&
+- loc <= (me->module_init + me->init_size));
++ return in_init_rx(me, loc) || in_init_rw(me, loc);
++}
++
++static inline int in_core_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rx &&
++ loc < (me->module_core_rx + me->core_size_rx));
++}
++
++static inline int in_core_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rw &&
++ loc < (me->module_core_rw + me->core_size_rw));
+ }
+
+ static inline int in_core(struct module *me, void *loc)
+ {
+- return (loc >= me->module_core &&
+- loc <= (me->module_core + me->core_size));
++ return in_core_rx(me, loc) || in_core_rw(me, loc);
+ }
+
+ static inline int in_local(struct module *me, void *loc)
+@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+ }
+
+ /* align things a bit */
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.got_offset = me->core_size;
+- me->core_size += gots * sizeof(struct got_entry);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += gots * sizeof(struct got_entry);
+
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.fdesc_offset = me->core_size;
+- me->core_size += fdescs * sizeof(Elf_Fdesc);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.fdesc_offset = me->core_size_rw;
++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+
+ BUG_ON(value == 0);
+
+- got = me->module_core + me->arch.got_offset;
++ got = me->module_core_rw + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+
+ /* Create new one */
+ fdesc->addr = value;
+- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index c9b9322..02d8940 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ }
+@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
+ if (addr < vma->vm_end) /* handle wraparound */
+@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (flags & MAP_FIXED)
+ return addr;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (filp) {
+ addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index f19e660..414fe24 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index 18162ce..94de376 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -15,6 +15,7 @@
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int *)addr);
++ err |= get_user(bv, (unsigned int *)(addr+4));
++ err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fix;
+@@ -192,8 +303,33 @@ good_area:
+
+ acc_type = parisc_acctyp(code,regs->iir);
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
+index 3bf9cca..e7457d0 100644
+--- a/arch/powerpc/include/asm/elf.h
++++ b/arch/powerpc/include/asm/elf.h
+@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
++#define ELF_ET_DYN_BASE (0x20000000)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
++#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
++#endif
+
+ /*
+ * Our registers are always unsigned longs, whether we're a 32 bit
+@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ (0x7ff >> (PAGE_SHIFT - 12)) : \
+ (0x3ffff >> (PAGE_SHIFT - 12)))
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* __KERNEL__ */
+
+ /*
+diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
+index bca8fdc..61e9580 100644
+--- a/arch/powerpc/include/asm/kmap_types.h
++++ b/arch/powerpc/include/asm/kmap_types.h
+@@ -27,6 +27,7 @@ enum km_type {
+ KM_PPC_SYNC_PAGE,
+ KM_PPC_SYNC_ICACHE,
+ KM_KDB,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
+index d4a7f64..451de1c 100644
+--- a/arch/powerpc/include/asm/mman.h
++++ b/arch/powerpc/include/asm/mman.h
+@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
+ }
+ #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
+
+-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
++static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
+ {
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+ }
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index dd9c4fd..a2ced87 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+@@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
+ #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+ #endif
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ /*
+ * Use the top bit of the higher-level page table entries to indicate whether
+ * the entries we point to contain hugepages. This works because we know that
+diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
+index fb40ede..d3ce956 100644
+--- a/arch/powerpc/include/asm/page_64.h
++++ b/arch/powerpc/include/asm/page_64.h
+@@ -144,15 +144,18 @@ do { \
+ * stack by default, so in the absence of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+ (is_32bit_task() ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+
+ #include <asm-generic/getorder.h>
+
+diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
+index 88b0bd9..e32bc67 100644
+--- a/arch/powerpc/include/asm/pgtable.h
++++ b/arch/powerpc/include/asm/pgtable.h
+@@ -2,6 +2,7 @@
+ #define _ASM_POWERPC_PGTABLE_H
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+ #ifndef __ASSEMBLY__
+ #include <asm/processor.h> /* For TASK_SIZE */
+ #include <asm/mmu.h>
+diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
+index 4aad413..85d86bf 100644
+--- a/arch/powerpc/include/asm/pte-hash32.h
++++ b/arch/powerpc/include/asm/pte-hash32.h
+@@ -21,6 +21,7 @@
+ #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
+ #define _PAGE_USER 0x004 /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
++#define _PAGE_EXEC _PAGE_GUARDED
+ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 559da19..7e5835c 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -212,6 +212,7 @@
+ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
+ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+ #define DSISR_NOHPTE 0x40000000 /* no translation found */
++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
+ #define DSISR_PROTFAULT 0x08000000 /* protection fault */
+ #define DSISR_ISSTORE 0x02000000 /* access was a store */
+ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
+index e30a13d..2b7d994 100644
+--- a/arch/powerpc/include/asm/system.h
++++ b/arch/powerpc/include/asm/system.h
+@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
+ #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+ #endif
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ /* Used in very early kernel initialization. */
+ extern unsigned long reloc_offset(void);
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index bd0fb84..a42a14b 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -13,6 +13,8 @@
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+@@ -327,52 +329,6 @@ do { \
+ extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+-#ifndef __powerpc64__
+-
+-static inline unsigned long copy_from_user(void *to,
+- const void __user *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_READ, from, n))
+- return __copy_tofrom_user((__force void __user *)to, from, n);
+- if ((unsigned long)from < TASK_SIZE) {
+- over = (unsigned long)from + n - TASK_SIZE;
+- return __copy_tofrom_user((__force void __user *)to, from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-static inline unsigned long copy_to_user(void __user *to,
+- const void *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_WRITE, to, n))
+- return __copy_tofrom_user(to, (__force void __user *)from, n);
+- if ((unsigned long)to < TASK_SIZE) {
+- over = (unsigned long)to + n - TASK_SIZE;
+- return __copy_tofrom_user(to, (__force void __user *)from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-#else /* __powerpc64__ */
+-
+-#define __copy_in_user(to, from, size) \
+- __copy_tofrom_user((to), (from), (size))
+-
+-extern unsigned long copy_from_user(void *to, const void __user *from,
+- unsigned long n);
+-extern unsigned long copy_to_user(void __user *to, const void *from,
+- unsigned long n);
+-extern unsigned long copy_in_user(void __user *to, const void __user *from,
+- unsigned long n);
+-
+-#endif /* __powerpc64__ */
+-
+ static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+ {
+@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
+
+@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+ }
+
+@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
+ return __copy_to_user_inatomic(to, from, size);
+ }
+
++#ifndef __powerpc64__
++
++static inline unsigned long __must_check copy_from_user(void *to,
++ const void __user *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_READ, from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ return __copy_tofrom_user((__force void __user *)to, from, n);
++ }
++ if ((unsigned long)from < TASK_SIZE) {
++ over = (unsigned long)from + n - TASK_SIZE;
++ if (!__builtin_constant_p(n - over))
++ check_object_size(to, n - over, false);
++ return __copy_tofrom_user((__force void __user *)to, from,
++ n - over) + over;
++ }
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++ const void *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_WRITE, to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ return __copy_tofrom_user(to, (__force void __user *)from, n);
++ }
++ if ((unsigned long)to < TASK_SIZE) {
++ over = (unsigned long)to + n - TASK_SIZE;
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n - over, true);
++ return __copy_tofrom_user(to, (__force void __user *)from,
++ n - over) + over;
++ }
++ return n;
++}
++
++#else /* __powerpc64__ */
++
++#define __copy_in_user(to, from, size) \
++ __copy_tofrom_user((to), (from), (size))
++
++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
++ if (likely(access_ok(VERIFY_READ, from, n)))
++ n = __copy_from_user(to, from, n);
++ else
++ memset(to, 0, n);
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ n = __copy_to_user(to, from, n);
++ }
++ return n;
++}
++
++extern unsigned long copy_in_user(void __user *to, const void __user *from,
++ unsigned long n);
++
++#endif /* __powerpc64__ */
++
+ extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
+index 429983c..7af363b 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -587,6 +587,7 @@ storage_fault_common:
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl .save_nvgprs
+ mr r4,r14
+ mr r5,r15
+ ld r14,PACA_EXGEN+EX_R14(r13)
+@@ -596,8 +597,7 @@ storage_fault_common:
+ cmpdi r3,0
+ bne- 1f
+ b .ret_from_except_lite
+-1: bl .save_nvgprs
+- mr r5,r3
++1: mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ld r4,_DAR(r1)
+ bl .bad_page_fault
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index cf9c69b..ebc9640 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1004,10 +1004,10 @@ handle_page_fault:
+ 11: ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl .save_nvgprs
+ bl .do_page_fault
+ cmpdi r3,0
+ beq+ 13f
+- bl .save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lwz r4,_DAR(r1)
+diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
+index 0b6d796..d760ddb 100644
+--- a/arch/powerpc/kernel/module_32.c
++++ b/arch/powerpc/kernel/module_32.c
+@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
+ me->arch.core_plt_section = i;
+ }
+ if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+- printk("Module doesn't contain .plt or .init.plt sections.\n");
++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
+ return -ENOEXEC;
+ }
+
+@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
+
+ DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ /* Init, or core PLT? */
+- if (location >= mod->module_core
+- && location < mod->module_core + mod->core_size)
++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
+ entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+- else
++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
+ entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++ else {
++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++ return ~0UL;
++ }
+
+ /* Find this entry, or if that fails, the next avail. entry */
+ while (entry->jump[0]) {
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 6457574..08b28d3 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
+ * Lookup NIP late so we have the best change of getting the
+ * above info out without failing
+ */
+- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
++ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
++ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
+ #endif
+ show_stack(current, (unsigned long *) regs->gpr[1]);
+ if (!user_mode(regs))
+@@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+ newsp = stack[0];
+ ip = stack[STACK_FRAME_LR_SAVE];
+ if (!firstframe || ip != lr) {
+- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
++ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((ip == rth || ip == mrth) && curr_frame >= 0) {
+- printk(" (%pS)",
++ printk(" (%pA)",
+ (void *)current->ret_stack[curr_frame].ret);
+ curr_frame--;
+ }
+@@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+ struct pt_regs *regs = (struct pt_regs *)
+ (sp + STACK_FRAME_OVERHEAD);
+ lr = regs->link;
+- printk("--- Exception: %lx at %pS\n LR = %pS\n",
++ printk("--- Exception: %lx at %pA\n LR = %pA\n",
+ regs->trap, (void *)regs->nip, (void *)lr);
+ firstframe = 1;
+ }
+@@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
+ }
+
+ #endif /* THREAD_SHIFT < PAGE_SHIFT */
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = 0;
+-
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+- else
+- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+-
+- return rnd << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+-#ifdef CONFIG_PPC_STD_MMU_64
+- /*
+- * If we are using 1TB segments and we are allowed to randomise
+- * the heap, we can put it above 1TB so it is backed by a 1TB
+- * segment. Otherwise the heap will be in the bottom 1TB
+- * which always uses 256MB segments and this may result in a
+- * performance penalty.
+- */
+- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
+- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
+-#endif
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < base)
+- return base;
+-
+- return ret;
+-}
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 836a5a1..27289a3 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
+ /* Save user registers on the stack */
+ frame = &rt_sf->uc.uc_mcontext;
+ addr = frame;
+- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ if (save_user_regs(regs, frame, 0, 1))
+ goto badframe;
+ regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index a50b5ec..547078a 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
+ current->thread.fpscr.val = 0;
+
+ /* Set up to return from userspace. */
+- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+ } else {
+ err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 5459d14..10f8070 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
+ static inline void pmac_backlight_unblank(void) { }
+ #endif
+
++extern void gr_handle_kernel_exploit(void);
++
+ int die(const char *str, struct pt_regs *regs, long err)
+ {
+ static struct {
+@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
+ if (panic_on_oops)
+ panic("Fatal exception");
+
++ gr_handle_kernel_exploit();
++
+ oops_exit();
+ do_exit(err);
+
+diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
+index 7d14bb6..1305601 100644
+--- a/arch/powerpc/kernel/vdso.c
++++ b/arch/powerpc/kernel/vdso.c
+@@ -35,6 +35,7 @@
+ #include <asm/firmware.h>
+ #include <asm/vdso.h>
+ #include <asm/vdso_datapage.h>
++#include <asm/mman.h>
+
+ #include "setup.h"
+
+@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ vdso_base = VDSO32_MBASE;
+ #endif
+
+- current->mm->context.vdso_base = 0;
++ current->mm->context.vdso_base = ~0UL;
+
+ /* vDSO has a problem and was disabled, just don't "enable" it for the
+ * process
+@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ vdso_base = get_unmapped_area(NULL, vdso_base,
+ (vdso_pages << PAGE_SHIFT) +
+ ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+- 0, 0);
++ 0, MAP_PRIVATE | MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
+diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
+index 5eea6f3..5d10396 100644
+--- a/arch/powerpc/lib/usercopy_64.c
++++ b/arch/powerpc/lib/usercopy_64.c
+@@ -9,22 +9,6 @@
+ #include <linux/module.h>
+ #include <asm/uaccess.h>
+
+-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_READ, from, n)))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
+-}
+-
+-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_WRITE, to, n)))
+- n = __copy_to_user(to, from, n);
+- return n;
+-}
+-
+ unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n)
+ {
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
+ return n;
+ }
+
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(copy_in_user);
+
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 5efe8c9..db9ceef 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -32,6 +32,10 @@
+ #include <linux/perf_event.h>
+ #include <linux/magic.h>
+ #include <linux/ratelimit.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -43,6 +47,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/siginfo.h>
+ #include <mm/mmu_decl.h>
++#include <asm/ptrace.h>
+
+ #ifdef CONFIG_KPROBES
+ static inline int notify_page_fault(struct pt_regs *regs)
+@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int __user *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (trap == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -259,7 +291,7 @@ good_area:
+ * "undefined". Of those that can be set, this is the only
+ * one which seems bad.
+ */
+- if (error_code & 0x10000000)
++ if (error_code & DSISR_GUARDED)
+ /* Guarded storage error. */
+ goto bad_area;
+ #endif /* CONFIG_8xx */
+@@ -274,7 +306,7 @@ good_area:
+ * processors use the same I/D cache coherency mechanism
+ * as embedded.
+ */
+- if (error_code & DSISR_PROTFAULT)
++ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
+ goto bad_area;
+ #endif /* CONFIG_PPC_STD_MMU */
+
+@@ -343,6 +375,23 @@ bad_area:
+ bad_area_nosemaphore:
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
++#else
++ if (is_exec && regs->nip == address) {
++#endif
++ switch (pax_handle_fetch_fault(regs)) {
++ }
++
++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ _exception(SIGSEGV, regs, code, address);
+ return 0;
+ }
+diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
+index 5a783d8..c23e14b 100644
+--- a/arch/powerpc/mm/mmap_64.c
++++ b/arch/powerpc/mm/mmap_64.c
+@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
+index 73709f7..6b90313 100644
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return check_heap_stack_gap(vma, addr, len);
+ }
+
+ static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+@@ -256,7 +256,7 @@ full_search:
+ addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
+ continue;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
+ }
+ }
+
+- addr = mm->mmap_base;
+- while (addr > len) {
++ if (mm->mmap_base < len)
++ addr = -ENOMEM;
++ else
++ addr = mm->mmap_base - len;
++
++ while (!IS_ERR_VALUE(addr)) {
+ /* Go down by chunk size */
+- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
++ addr = _ALIGN_DOWN(addr, 1ul << pshift);
+
+ /* Check for hit with different page size */
+ mask = slice_range_to_mask(addr, len);
+@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || (addr + len) <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* remember the address as a hint for next time */
+ if (use_cache)
+ mm->free_area_cache = addr;
+@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start;
++ addr = skip_heap_stack_gap(vma, len);
+ }
+
+ /*
+@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+ if (fixed && addr > (mm->task_size - len))
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
++ addr = 0;
++#endif
++
+ /* If hint, make sure it matches our alignment restrictions */
+ if (!fixed && addr) {
+ addr = _ALIGN_UP(addr, 1ul << pshift);
+diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
+index 547f1a6..3fff354 100644
+--- a/arch/s390/include/asm/elf.h
++++ b/arch/s390/include/asm/elf.h
+@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
++#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. */
+@@ -211,7 +217,4 @@ struct linux_binprm;
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ int arch_setup_additional_pages(struct linux_binprm *, int);
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif
+diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
+index ef573c1..75a1ce6 100644
+--- a/arch/s390/include/asm/system.h
++++ b/arch/s390/include/asm/system.h
+@@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
+ extern void (*_machine_halt)(void);
+ extern void (*_machine_power_off)(void);
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ static inline int tprot(unsigned long addr)
+ {
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index 2b23885..e136e31 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -235,6 +235,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
+ static inline unsigned long __must_check
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n) && (n <= 256))
+ return uaccess.copy_from_user_small(n, from, to);
+ else
+@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+ unsigned int sz = __compiletime_object_size(to);
+
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (unlikely(sz != -1 && sz < n)) {
+ copy_from_user_overflow();
+ return n;
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index dfcb343..eda788a 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+
+ /* Increase core size by size of got & plt and set start
+ offsets for got and plt. */
+- me->core_size = ALIGN(me->core_size, 4);
+- me->arch.got_offset = me->core_size;
+- me->core_size += me->arch.got_size;
+- me->arch.plt_offset = me->core_size;
+- me->core_size += me->arch.plt_size;
++ me->core_size_rw = ALIGN(me->core_size_rw, 4);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += me->arch.got_size;
++ me->arch.plt_offset = me->core_size_rx;
++ me->core_size_rx += me->arch.plt_size;
+ return 0;
+ }
+
+@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ if (info->got_initialized == 0) {
+ Elf_Addr *gotent;
+
+- gotent = me->module_core + me->arch.got_offset +
++ gotent = me->module_core_rw + me->arch.got_offset +
+ info->got_offset;
+ *gotent = val;
+ info->got_initialized = 1;
+@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ else if (r_type == R_390_GOTENT ||
+ r_type == R_390_GOTPLTENT)
+ *(unsigned int *) loc =
+- (val + (Elf_Addr) me->module_core - loc) >> 1;
++ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
+ else if (r_type == R_390_GOT64 ||
+ r_type == R_390_GOTPLT64)
+ *(unsigned long *) loc = val;
+@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
+ if (info->plt_initialized == 0) {
+ unsigned int *ip;
+- ip = me->module_core + me->arch.plt_offset +
++ ip = me->module_core_rx + me->arch.plt_offset +
+ info->plt_offset;
+ #ifndef CONFIG_64BIT
+ ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
+@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ val - loc + 0xffffUL < 0x1ffffeUL) ||
+ (r_type == R_390_PLT32DBL &&
+ val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+- val = (Elf_Addr) me->module_core +
++ val = (Elf_Addr) me->module_core_rx +
+ me->arch.plt_offset +
+ info->plt_offset;
+ val += rela->r_addend - loc;
+@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
+ case R_390_GOTOFF64: /* 64 bit offset to GOT. */
+ val = val + rela->r_addend -
+- ((Elf_Addr) me->module_core + me->arch.got_offset);
++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
+ if (r_type == R_390_GOTOFF16)
+ *(unsigned short *) loc = val;
+ else if (r_type == R_390_GOTOFF32)
+@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ break;
+ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
+ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
+- val = (Elf_Addr) me->module_core + me->arch.got_offset +
++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
+ rela->r_addend - loc;
+ if (r_type == R_390_GOTPC)
+ *(unsigned int *) loc = val;
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index 9451b21..ed8956f 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
+ }
+ return 0;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+- else
+- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+- return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (!(current->flags & PF_RANDOMIZE))
+- return base;
+- if (ret < base)
+- return base;
+- return ret;
+-}
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index f09c748..cf9ec1d 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
+index 589d5c7..669e274 100644
+--- a/arch/score/include/asm/system.h
++++ b/arch/score/include/asm/system.h
+@@ -17,7 +17,7 @@ do { \
+ #define finish_arch_switch(prev) do {} while (0)
+
+ typedef void (*vi_handler_t)(void);
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) (x)
+
+ #define mb() barrier()
+ #define rmb() barrier()
+diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
+index 25d0803..d6c8e36 100644
+--- a/arch/score/kernel/process.c
++++ b/arch/score/kernel/process.c
+@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
+
+ return task_pt_regs(task)->cp0_epc;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- return sp;
+-}
+diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
+index afeb710..d1d1289 100644
+--- a/arch/sh/mm/mmap.c
++++ b/arch/sh/mm/mmap.c
+@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -106,7 +105,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = mm->mmap_base-len;
+- if (do_colour_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
++ addr = mm->mmap_base - len;
+
+ do {
++ if (do_colour_align)
++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- if (do_colour_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
+index ad1fb5d..fc5315b 100644
+--- a/arch/sparc/Makefile
++++ b/arch/sparc/Makefile
+@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
+ # Export what is needed by arch/sparc/boot/Makefile
+ export VMLINUX_INIT VMLINUX_MAIN
+ VMLINUX_INIT := $(head-y) $(init-y)
+-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+ VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
+ VMLINUX_MAIN += $(drivers-y) $(net-y)
+
+diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
+index 9f421df..b81fc12 100644
+--- a/arch/sparc/include/asm/atomic_64.h
++++ b/arch/sparc/include/asm/atomic_64.h
+@@ -14,18 +14,40 @@
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
+ #define atomic64_read(v) (*(volatile long *)&(v)->counter)
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return v->counter;
++}
+
+ #define atomic_set(v, i) (((v)->counter) = i)
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+ #define atomic64_set(v, i) (((v)->counter) = i)
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
+
+ extern void atomic_add(int, atomic_t *);
++extern void atomic_add_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_add(long, atomic64_t *);
++extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
+ extern void atomic_sub(int, atomic_t *);
++extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_sub(long, atomic64_t *);
++extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
+
+ extern int atomic_add_ret(int, atomic_t *);
++extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
+ extern long atomic64_add_ret(long, atomic64_t *);
++extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
+ extern int atomic_sub_ret(int, atomic_t *);
+ extern long atomic64_sub_ret(long, atomic64_t *);
+
+@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+ #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
+
+ #define atomic_inc_return(v) atomic_add_ret(1, v)
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_ret_unchecked(1, v);
++}
+ #define atomic64_inc_return(v) atomic64_add_ret(1, v)
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_ret_unchecked(1, v);
++}
+
+ #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
+ #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
+
+ #define atomic_add_return(i, v) atomic_add_ret(i, v)
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ return atomic_add_ret_unchecked(i, v);
++}
+ #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
++ return atomic64_add_ret_unchecked(i, v);
++}
+
+ /*
+ * atomic_inc_and_test - increment and test
+@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_inc_return_unchecked(v) == 0;
++}
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+ #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
+@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+ #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic64_inc(v) atomic64_add(1, v)
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
++}
+
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+ #define atomic64_dec(v) atomic64_sub(1, v)
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_sub_unchecked(1, v);
++}
+
+ #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
+ #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
+
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%icc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ #define atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%xcc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
+index 69358b5..17b4745 100644
+--- a/arch/sparc/include/asm/cache.h
++++ b/arch/sparc/include/asm/cache.h
+@@ -10,7 +10,7 @@
+ #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES 32
++#define L1_CACHE_BYTES 32UL
+
+ #ifdef CONFIG_SPARC32
+ #define SMP_CACHE_BYTES_SHIFT 5
+diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
+index 4269ca6..e3da77f 100644
+--- a/arch/sparc/include/asm/elf_32.h
++++ b/arch/sparc/include/asm/elf_32.h
+@@ -114,6 +114,13 @@ typedef struct {
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
+index 7df8b7f..4946269 100644
+--- a/arch/sparc/include/asm/elf_64.h
++++ b/arch/sparc/include/asm/elf_64.h
+@@ -180,6 +180,13 @@ typedef struct {
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
++#endif
++
+ extern unsigned long sparc64_elf_hwcap;
+ #define ELF_HWCAP sparc64_elf_hwcap
+
+diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
+index a790cc6..091ed94 100644
+--- a/arch/sparc/include/asm/pgtable_32.h
++++ b/arch/sparc/include/asm/pgtable_32.h
+@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
+ BTFIXUPDEF_INT(page_none)
+ BTFIXUPDEF_INT(page_copy)
+ BTFIXUPDEF_INT(page_readonly)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++BTFIXUPDEF_INT(page_shared_noexec)
++BTFIXUPDEF_INT(page_copy_noexec)
++BTFIXUPDEF_INT(page_readonly_noexec)
++#endif
++
+ BTFIXUPDEF_INT(page_kernel)
+
+ #define PMD_SHIFT SUN4C_PMD_SHIFT
+@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
+ #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
+ #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++extern pgprot_t PAGE_SHARED_NOEXEC;
++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ extern unsigned long page_kernel;
+
+ #ifdef MODULE
+diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
+index f6ae2b2..b03ffc7 100644
+--- a/arch/sparc/include/asm/pgtsrmmu.h
++++ b/arch/sparc/include/asm/pgtsrmmu.h
+@@ -115,6 +115,13 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#endif
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
+index 9689176..63c18ea 100644
+--- a/arch/sparc/include/asm/spinlock_64.h
++++ b/arch/sparc/include/asm/spinlock_64.h
+@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
+
+ /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
+
+-static void inline arch_read_lock(arch_rwlock_t *lock)
++static inline void arch_read_lock(arch_rwlock_t *lock)
+ {
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__ (
+ "1: ldsw [%2], %0\n"
+ " brlz,pn %0, 2f\n"
+-"4: add %0, 1, %1\n"
++"4: addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
+ " .previous"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (lock)
+- : "memory");
++ : "memory", "cc");
+ }
+
+-static int inline arch_read_trylock(arch_rwlock_t *lock)
++static inline int arch_read_trylock(arch_rwlock_t *lock)
+ {
+ int tmp1, tmp2;
+
+@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
+ "1: ldsw [%2], %0\n"
+ " brlz,a,pn %0, 2f\n"
+ " mov 0, %0\n"
+-" add %0, 1, %1\n"
++" addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
+ return tmp1;
+ }
+
+-static void inline arch_read_unlock(arch_rwlock_t *lock)
++static inline void arch_read_unlock(arch_rwlock_t *lock)
+ {
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__(
+ "1: lduw [%2], %0\n"
+-" sub %0, 1, %1\n"
++" subcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%xcc, 1b\n"
+@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
+ : "memory");
+ }
+
+-static void inline arch_write_lock(arch_rwlock_t *lock)
++static inline void arch_write_lock(arch_rwlock_t *lock)
+ {
+ unsigned long mask, tmp1, tmp2;
+
+@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
+ : "memory");
+ }
+
+-static void inline arch_write_unlock(arch_rwlock_t *lock)
++static inline void arch_write_unlock(arch_rwlock_t *lock)
+ {
+ __asm__ __volatile__(
+ " stw %%g0, [%0]"
+@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
+ : "memory");
+ }
+
+-static int inline arch_write_trylock(arch_rwlock_t *lock)
++static inline int arch_write_trylock(arch_rwlock_t *lock)
+ {
+ unsigned long mask, tmp1, tmp2, result;
+
+diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
+index fa57532..e1a4c53 100644
+--- a/arch/sparc/include/asm/thread_info_32.h
++++ b/arch/sparc/include/asm/thread_info_32.h
+@@ -50,6 +50,8 @@ struct thread_info {
+ unsigned long w_saved;
+
+ struct restart_block restart_block;
++
++ unsigned long lowest_stack;
+ };
+
+ /*
+diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
+index 60d86be..952dea1 100644
+--- a/arch/sparc/include/asm/thread_info_64.h
++++ b/arch/sparc/include/asm/thread_info_64.h
+@@ -63,6 +63,8 @@ struct thread_info {
+ struct pt_regs *kern_una_regs;
+ unsigned int kern_una_insn;
+
++ unsigned long lowest_stack;
++
+ unsigned long fpregs[0] __attribute__ ((aligned(64)));
+ };
+
+diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
+index e88fbe5..96b0ce5 100644
+--- a/arch/sparc/include/asm/uaccess.h
++++ b/arch/sparc/include/asm/uaccess.h
+@@ -1,5 +1,13 @@
+ #ifndef ___ASM_SPARC_UACCESS_H
+ #define ___ASM_SPARC_UACCESS_H
++
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#include <linux/types.h>
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++#endif
++#endif
++
+ #if defined(__sparc__) && defined(__arch64__)
+ #include <asm/uaccess_64.h>
+ #else
+diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
+index 8303ac4..07f333d 100644
+--- a/arch/sparc/include/asm/uaccess_32.h
++++ b/arch/sparc/include/asm/uaccess_32.h
+@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
+
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) to, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_user(to, (__force void __user *) from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_user(to, (__force void __user *) from, n);
+ }
+
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) from, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_user((__force void __user *) to, from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ return __copy_user((__force void __user *) to, from, n);
+ }
+
+diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
+index 3e1449f..5293a0e 100644
+--- a/arch/sparc/include/asm/uaccess_64.h
++++ b/arch/sparc/include/asm/uaccess_64.h
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
++#include <linux/kernel.h>
+ #include <asm/asi.h>
+ #include <asm/system.h>
+ #include <asm/spitfire.h>
+@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_from_user(to, from, size);
++ unsigned long ret;
+
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(to, size, false);
++
++ ret = ___copy_from_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_from_user_fixup(to, from, size);
+
+@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
+ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_to_user(to, from, size);
++ unsigned long ret;
+
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(from, size, true);
++
++ ret = ___copy_to_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_to_user_fixup(to, from, size);
+ return ret;
+diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
+index cb85458..e063f17 100644
+--- a/arch/sparc/kernel/Makefile
++++ b/arch/sparc/kernel/Makefile
+@@ -3,7 +3,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ extra-y := head_$(BITS).o
+ extra-y += init_task.o
+diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
+index f793742..4d880af 100644
+--- a/arch/sparc/kernel/process_32.c
++++ b/arch/sparc/kernel/process_32.c
+@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
+ rw->ins[4], rw->ins[5],
+ rw->ins[6],
+ rw->ins[7]);
+- printk("%pS\n", (void *) rw->ins[7]);
++ printk("%pA\n", (void *) rw->ins[7]);
+ rw = (struct reg_window32 *) rw->ins[6];
+ }
+ spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
+@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
+
+ printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
+ r->psr, r->pc, r->npc, r->y, print_tainted());
+- printk("PC: <%pS>\n", (void *) r->pc);
++ printk("PC: <%pA>\n", (void *) r->pc);
+ printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
+ r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
+ printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
+ r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
+
+ printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+ rw = (struct reg_window32 *) fp;
+ pc = rw->ins[7];
+ printk("[%08lx : ", pc);
+- printk("%pS ] ", (void *) pc);
++ printk("%pA ] ", (void *) pc);
+ fp = rw->ins[6];
+ } while (++count < 16);
+ printk("\n");
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index 3739a06..48b2ff0 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
+ printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+ rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
+ if (regs->tstate & TSTATE_PRIV)
+- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
++ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
+ }
+
+ void show_regs(struct pt_regs *regs)
+ {
+ printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
+ regs->tpc, regs->tnpc, regs->y, print_tainted());
+- printk("TPC: <%pS>\n", (void *) regs->tpc);
++ printk("TPC: <%pA>\n", (void *) regs->tpc);
+ printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
+ printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
+ show_regwindow(regs);
+ show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
+ }
+@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
+ ((tp && tp->task) ? tp->task->pid : -1));
+
+ if (gp->tstate & TSTATE_PRIV) {
+- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
++ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
+ (void *) gp->tpc,
+ (void *) gp->o7,
+ (void *) gp->i7,
+diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
+index 42b282f..28ce9f2 100644
+--- a/arch/sparc/kernel/sys_sparc_32.c
++++ b/arch/sparc/kernel/sys_sparc_32.c
+@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (ARCH_SUN4C && len > 0x20000000)
+ return -ENOMEM;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr);
+@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ }
+ if (TASK_SIZE - PAGE_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 441521a..b767073 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ }
+
+@@ -174,14 +177,14 @@ full_search:
+ vma = find_vma(mm, VA_EXCLUDE_END);
+ }
+ if (unlikely(task_size < addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = mm->mmap_base-len;
+- if (do_color_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
++ addr = mm->mmap_base - len;
+
+ do {
++ if (do_color_align)
++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- if (do_color_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ gap == RLIM_INFINITY ||
+ sysctl_legacy_va_layout) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ gap = (task_size / 6 * 5);
+
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
+index 591f20c..0f1b925 100644
+--- a/arch/sparc/kernel/traps_32.c
++++ b/arch/sparc/kernel/traps_32.c
+@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
+ #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
+ #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
+
++extern void gr_handle_kernel_exploit(void);
++
+ void die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+ count++ < 30 &&
+ (((unsigned long) rw) >= PAGE_OFFSET) &&
+ !(((unsigned long) rw) & 0x7)) {
+- printk("Caller[%08lx]: %pS\n", rw->ins[7],
++ printk("Caller[%08lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+ rw = (struct reg_window32 *)rw->ins[6];
+ }
+ }
+ printk("Instruction DUMP:");
+ instruction_dump ((unsigned long *) regs->pc);
+- if(regs->psr & PSR_PS)
++ if(regs->psr & PSR_PS) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index 0cbdaa4..438e4c9 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
+ i + 1,
+ p->trapstack[i].tstate, p->trapstack[i].tpc,
+ p->trapstack[i].tnpc, p->trapstack[i].tt);
+- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
++ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
+ }
+ }
+
+@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
+
+ lvl -= 0x100;
+ if (regs->tstate & TSTATE_PRIV) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+ die_if_kernel(buffer, regs);
+ }
+@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
+ void bad_trap_tl1(struct pt_regs *regs, long lvl)
+ {
+ char buffer[32];
+-
++
+ if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+ 0, lvl, SIGTRAP) == NOTIFY_STOP)
+ return;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
+ regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
+ printk("%s" "ERROR(%d): ",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
+- printk("TPC<%pS>\n", (void *) regs->tpc);
++ printk("TPC<%pA>\n", (void *) regs->tpc);
+ printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
+@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
+ panic("Irrecoverable Cheetah+ parity error.");
+ }
+
+@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
+ }
+
+ struct sun4v_error_entry {
+@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+
+ printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+
+ printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+ fp = (unsigned long)sf->fp + STACK_BIAS;
+ }
+
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = tsk->curr_ret_stack;
+ if (tsk->ret_stack && index >= graph) {
+ pc = tsk->ret_stack[index - graph].ret;
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ graph++;
+ }
+ }
+@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
+ return (struct reg_window *) (fp + STACK_BIAS);
+ }
+
++extern void gr_handle_kernel_exploit(void);
++
+ void die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+ while (rw &&
+ count++ < 30 &&
+ kstack_valid(tp, (unsigned long) rw)) {
+- printk("Caller[%016lx]: %pS\n", rw->ins[7],
++ printk("Caller[%016lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+
+ rw = kernel_stack_up(rw);
+@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+ }
+ user_instruction_dump ((unsigned int __user *) regs->tpc);
+ }
+- if (regs->tstate & TSTATE_PRIV)
++ if (regs->tstate & TSTATE_PRIV) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+ EXPORT_SYMBOL(die_if_kernel);
+diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
+index 76e4ac1..78f8bb1 100644
+--- a/arch/sparc/kernel/unaligned_64.c
++++ b/arch/sparc/kernel/unaligned_64.c
+@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+
+ if (__ratelimit(&ratelimit)) {
+- printk("Kernel unaligned access at TPC[%lx] %pS\n",
++ printk("Kernel unaligned access at TPC[%lx] %pA\n",
+ regs->tpc, (void *) regs->tpc);
+ }
+ }
+diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
+index a3fc437..fea9957 100644
+--- a/arch/sparc/lib/Makefile
++++ b/arch/sparc/lib/Makefile
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi -DST_DIV0=0x02
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
+ lib-$(CONFIG_SPARC32) += memcpy.o memset.o
+diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
+index 59186e0..f747d7a 100644
+--- a/arch/sparc/lib/atomic_64.S
++++ b/arch/sparc/lib/atomic_64.S
+@@ -18,7 +18,12 @@
+ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add, .-atomic_add
+
++ .globl atomic_add_unchecked
++ .type atomic_add_unchecked,#function
++atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ add %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_unchecked, .-atomic_add_unchecked
++
+ .globl atomic_sub
+ .type atomic_sub,#function
+ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_sub, .-atomic_sub
+
++ .globl atomic_sub_unchecked
++ .type atomic_sub_unchecked,#function
++atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ sub %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_sub_unchecked, .-atomic_sub_unchecked
++
+ .globl atomic_add_ret
+ .type atomic_add_ret,#function
+ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add_ret, .-atomic_add_ret
+
++ .globl atomic_add_ret_unchecked
++ .type atomic_add_ret_unchecked,#function
++atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ addcc %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ add %g7, %o0, %g7
++ sra %g7, 0, %o0
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
++
+ .globl atomic_sub_ret
+ .type atomic_sub_ret,#function
+ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add, .-atomic64_add
+
++ .globl atomic64_add_unchecked
++ .type atomic64_add_unchecked,#function
++atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ addcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_add_unchecked, .-atomic64_add_unchecked
++
+ .globl atomic64_sub
+ .type atomic64_sub,#function
+ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_sub, .-atomic64_sub
+
++ .globl atomic64_sub_unchecked
++ .type atomic64_sub_unchecked,#function
++atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ subcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
++
+ .globl atomic64_add_ret
+ .type atomic64_add_ret,#function
+ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add_ret, .-atomic64_add_ret
+
++ .globl atomic64_add_ret_unchecked
++ .type atomic64_add_ret_unchecked,#function
++atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ addcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ add %g7, %o0, %g7
++ mov %g7, %o0
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
++
+ .globl atomic64_sub_ret
+ .type atomic64_sub_ret,#function
+ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
+index 1b30bb3..b4a16c7 100644
+--- a/arch/sparc/lib/ksyms.c
++++ b/arch/sparc/lib/ksyms.c
+@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
+
+ /* Atomic counter implementation. */
+ EXPORT_SYMBOL(atomic_add);
++EXPORT_SYMBOL(atomic_add_unchecked);
+ EXPORT_SYMBOL(atomic_add_ret);
++EXPORT_SYMBOL(atomic_add_ret_unchecked);
+ EXPORT_SYMBOL(atomic_sub);
++EXPORT_SYMBOL(atomic_sub_unchecked);
+ EXPORT_SYMBOL(atomic_sub_ret);
+ EXPORT_SYMBOL(atomic64_add);
++EXPORT_SYMBOL(atomic64_add_unchecked);
+ EXPORT_SYMBOL(atomic64_add_ret);
++EXPORT_SYMBOL(atomic64_add_ret_unchecked);
+ EXPORT_SYMBOL(atomic64_sub);
++EXPORT_SYMBOL(atomic64_sub_unchecked);
+ EXPORT_SYMBOL(atomic64_sub_ret);
+
+ /* Atomic bit operations. */
+diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
+index 301421c..e2535d1 100644
+--- a/arch/sparc/mm/Makefile
++++ b/arch/sparc/mm/Makefile
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
+ obj-y += fault_$(BITS).o
+diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
+index 8023fd7..c8e89e9 100644
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
+@@ -21,6 +21,9 @@
+ #include <linux/perf_event.h>
+ #include <linux/interrupt.h>
+ #include <linux/kdebug.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/system.h>
+ #include <asm/page.h>
+@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+ return safe_compute_effective_address(regs, insn);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->pc);
++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->pc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned int addr;
++
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(ba, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->pc-4));
++ err |= get_user(call, (unsigned int *)regs->pc);
++ err |= get_user(nop, (unsigned int *)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+ int text_fault)
+ {
+@@ -280,6 +545,24 @@ good_area:
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 504c062..6fcb9c6 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -21,6 +21,9 @@
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
+ #include <linux/percpu.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
+ printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+ regs->tpc);
+ printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
+- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
++ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
+ printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
+ dump_stack();
+ unhandled_fault(regs->tpc, current, regs);
+@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
+ show_regs(regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->tpc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned long addr;
++
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int sethi, mov1, call, mov2;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(call, (unsigned int *)(regs->tpc+8));
++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or, (unsigned int *)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020U &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++
++ /* PaX: 64-bit PLT stub */
++ err = get_user(sethi1, (unsigned int *)addr);
++ err |= get_user(sethi2, (unsigned int *)(addr+4));
++ err |= get_user(or1, (unsigned int *)(addr+8));
++ err |= get_user(or2, (unsigned int *)(addr+12));
++ err |= get_user(sllx, (unsigned int *)(addr+16));
++ err |= get_user(add, (unsigned int *)(addr+20));
++ err |= get_user(jmpl, (unsigned int *)(addr+24));
++ err |= get_user(nop, (unsigned int *)(addr+28));
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x88112000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x89293020U &&
++ add == 0x8A010005U &&
++ jmpl == 0x89C14000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G4] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
++ regs->u_regs[UREG_G4] = addr + 24;
++ addr = regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->tpc-4));
++ err |= get_user(call, (unsigned int *)regs->tpc);
++ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ dl_resolve &= 0xFFFFFFFFUL;
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (ba & 0xFFF00000U) == 0x30600000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index 07e1453..0a7d9e9 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -67,7 +67,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = (mm->mmap_base-len) & HPAGE_MASK;
++ addr = mm->mmap_base - len;
+
+ do {
++ addr &= HPAGE_MASK;
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = (vma->vm_start-len) & HPAGE_MASK;
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
+index 7b00de6..78239f4 100644
+--- a/arch/sparc/mm/init_32.c
++++ b/arch/sparc/mm/init_32.c
+@@ -316,6 +316,9 @@ extern void device_scan(void);
+ pgprot_t PAGE_SHARED __read_mostly;
+ EXPORT_SYMBOL(PAGE_SHARED);
+
++pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
++EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
++
+ void __init paging_init(void)
+ {
+ switch(sparc_cpu_model) {
+@@ -344,17 +347,17 @@ void __init paging_init(void)
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+- protection_map[1] = PAGE_READONLY;
+- protection_map[2] = PAGE_COPY;
+- protection_map[3] = PAGE_COPY;
++ protection_map[1] = PAGE_READONLY_NOEXEC;
++ protection_map[2] = PAGE_COPY_NOEXEC;
++ protection_map[3] = PAGE_COPY_NOEXEC;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+- protection_map[9] = PAGE_READONLY;
+- protection_map[10] = PAGE_SHARED;
+- protection_map[11] = PAGE_SHARED;
++ protection_map[9] = PAGE_READONLY_NOEXEC;
++ protection_map[10] = PAGE_SHARED_NOEXEC;
++ protection_map[11] = PAGE_SHARED_NOEXEC;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
+index cbef74e..c38fead 100644
+--- a/arch/sparc/mm/srmmu.c
++++ b/arch/sparc/mm/srmmu.c
+@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
+ PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
++#endif
++
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
+ page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 7730af6..cce5b19 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
+ $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
+ $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
+
++ifdef CONSTIFY_PLUGIN
++USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
++endif
++
+ #This will adjust *FLAGS accordingly to the platform.
+ include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
+
+diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
+index 6c03acd..a5e0215 100644
+--- a/arch/um/include/asm/kmap_types.h
++++ b/arch/um/include/asm/kmap_types.h
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
+index 7cfc3ce..cbd1a58 100644
+--- a/arch/um/include/asm/page.h
++++ b/arch/um/include/asm/page.h
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ struct page;
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index c533835..84db18e 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -406,22 +406,6 @@ int singlestepping(void * t)
+ return 2;
+ }
+
+-/*
+- * Only x86 and x86_64 have an arch_align_stack().
+- * All other arches have "#define arch_align_stack(x) (x)"
+- * in their asm/system.h
+- * As this is included in UML from asm-um/system-generic.h,
+- * we can use it to behave as the subarch does.
+- */
+-#ifndef arch_align_stack
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
+-#endif
+-
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ unsigned long stack_page, sp, ip;
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index efb4294..61bc18c 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -235,7 +235,7 @@ config X86_HT
+
+ config X86_32_LAZY_GS
+ def_bool y
+- depends on X86_32 && !CC_STACKPROTECTOR
++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
+
+ config ARCH_HWEIGHT_CFLAGS
+ string
+@@ -1022,7 +1022,7 @@ choice
+
+ config NOHIGHMEM
+ bool "off"
+- depends on !X86_NUMAQ
++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+ However, the address space of 32-bit x86 processors is only 4
+@@ -1059,7 +1059,7 @@ config NOHIGHMEM
+
+ config HIGHMEM4G
+ bool "4GB"
+- depends on !X86_NUMAQ
++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Select this if you have a 32-bit processor and between 1 and 4
+ gigabytes of physical RAM.
+@@ -1113,7 +1113,7 @@ config PAGE_OFFSET
+ hex
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0x80000000 if VMSPLIT_2G
+- default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x70000000 if VMSPLIT_2G_OPT
+ default 0x40000000 if VMSPLIT_1G
+ default 0xC0000000
+ depends on X86_32
+@@ -1496,6 +1496,7 @@ config SECCOMP
+
+ config CC_STACKPROTECTOR
+ bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
++ depends on X86_64 || !PAX_MEMORY_UDEREF
+ ---help---
+ This option turns on the -fstack-protector GCC feature. This
+ feature puts, at the beginning of functions, a canary value on
+@@ -1553,6 +1554,7 @@ config KEXEC_JUMP
+ config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
+ default "0x1000000"
++ range 0x400000 0x40000000
+ ---help---
+ This gives the physical address where the kernel is loaded.
+
+@@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
+ config PHYSICAL_ALIGN
+ hex "Alignment value to which kernel should be aligned" if X86_32
+ default "0x1000000"
++ range 0x400000 0x1000000 if PAX_KERNEXEC
+ range 0x2000 0x1000000
+ ---help---
+ This value puts the alignment restrictions on physical address
+@@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
+ Say N if you want to disable CPU hotplug.
+
+ config COMPAT_VDSO
+- def_bool y
++ def_bool n
+ prompt "Compat VDSO support"
+ depends on X86_32 || IA32_EMULATION
++ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
+ ---help---
+ Map the 32-bit VDSO to the predictable old-style address too.
+
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index e3ca7e0..b30b28a 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
+
+ config X86_INVD_BUG
+ def_bool y
+@@ -365,7 +365,7 @@ config X86_POPAD_OK
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+@@ -411,7 +411,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index bf56e17..05f9891 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -81,7 +81,7 @@ config X86_PTDUMP
+ config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ default y
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && BROKEN
+ ---help---
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
+
+ config DEBUG_SET_MODULE_RONX
+ bool "Set loadable kernel module data as NX and text as RO"
+- depends on MODULES
++ depends on MODULES && BROKEN
+ ---help---
+ This option helps catch unintended modifications to loadable
+ kernel module's text and read-only data. It also prevents execution
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index b02e509..2631e48 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -46,6 +46,7 @@ else
+ UTS_MACHINE := x86_64
+ CHECKFLAGS += -D__x86_64__ -m64
+
++ biarch := $(call cc-option,-m64)
+ KBUILD_AFLAGS += -m64
+ KBUILD_CFLAGS += -m64
+
+@@ -195,3 +196,12 @@ define archhelp
+ echo ' FDARGS="..." arguments for the booted kernel'
+ echo ' FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
++
++archprepare:
++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 95365a8..52f857b 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+ KBUILD_CFLAGS += $(call cc-option, -m32)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
++endif
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+
+diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
+index 878e4b9..20537ab 100644
+--- a/arch/x86/boot/bitops.h
++++ b/arch/x86/boot/bitops.h
+@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
+ u8 v;
+ const u32 *p = (const u32 *)addr;
+
+- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ return v;
+ }
+
+@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
+
+ static inline void set_bit(int nr, void *addr)
+ {
+- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+
+ #endif /* BOOT_BITOPS_H */
+diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
+index c7093bd..d4247ffe0 100644
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -85,7 +85,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+ u16 seg;
+- asm("movw %%ds,%0" : "=rm" (seg));
++ asm volatile("movw %%ds,%0" : "=rm" (seg));
+ return seg;
+ }
+
+@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
+ static inline int memcmp(const void *s1, const void *s2, size_t len)
+ {
+ u8 diff;
+- asm("repe; cmpsb; setnz %0"
++ asm volatile("repe; cmpsb; setnz %0"
+ : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ return diff;
+ }
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 09664ef..edc5d03 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
+ KBUILD_CFLAGS += $(cflags-y)
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
++endif
+
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
+index 67a655a..b924059 100644
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -76,7 +76,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -162,7 +162,7 @@ relocated:
+ * and where it was actually loaded.
+ */
+ movl %ebp, %ebx
+- subl $LOAD_PHYSICAL_ADDR, %ebx
++ subl $____LOAD_PHYSICAL_ADDR, %ebx
+ jz 2f /* Nothing to be done if loaded at compiled addr. */
+ /*
+ * Process relocations.
+@@ -170,8 +170,7 @@ relocated:
+
+ 1: subl $4, %edi
+ movl (%edi), %ecx
+- testl %ecx, %ecx
+- jz 2f
++ jecxz 2f
+ addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+ jmp 1b
+ 2:
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index 35af09d..99c9676 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -91,7 +91,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -233,7 +233,7 @@ ENTRY(startup_64)
+ notq %rax
+ andq %rax, %rbp
+ #else
+- movq $LOAD_PHYSICAL_ADDR, %rbp
++ movq $____LOAD_PHYSICAL_ADDR, %rbp
+ #endif
+
+ /* Target address to relocate to for decompression */
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index 3a19d04..7c1d55a 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -310,7 +310,7 @@ static void parse_elf(void *output)
+ case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+ dest = output;
+- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+ dest = (void *)(phdr->p_paddr);
+ #endif
+@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
+ error("Destination address too large");
+ #endif
+ #ifndef CONFIG_RELOCATABLE
+- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
+ error("Wrong destination address");
+ #endif
+
+diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
+index 89bbf4e..869908e 100644
+--- a/arch/x86/boot/compressed/relocs.c
++++ b/arch/x86/boot/compressed/relocs.c
+@@ -13,8 +13,11 @@
+
+ static void die(char *fmt, ...);
+
++#include "../../../../include/generated/autoconf.h"
++
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ static Elf32_Ehdr ehdr;
++static Elf32_Phdr *phdr;
+ static unsigned long reloc_count, reloc_idx;
+ static unsigned long *relocs;
+
+@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
+ }
+ }
+
++static void read_phdrs(FILE *fp)
++{
++ unsigned int i;
++
++ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
++ if (!phdr) {
++ die("Unable to allocate %d program headers\n",
++ ehdr.e_phnum);
++ }
++ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
++ die("Seek to %d failed: %s\n",
++ ehdr.e_phoff, strerror(errno));
++ }
++ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
++ die("Cannot read ELF program headers: %s\n",
++ strerror(errno));
++ }
++ for(i = 0; i < ehdr.e_phnum; i++) {
++ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
++ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
++ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
++ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
++ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
++ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
++ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
++ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
++ }
++
++}
++
+ static void read_shdrs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ Elf32_Shdr shdr;
+
+ secs = calloc(ehdr.e_shnum, sizeof(struct section));
+@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
+
+ static void read_strtabs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_STRTAB) {
+@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
+
+ static void read_symtabs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
+
+ static void read_relocs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
++ uint32_t base;
++
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
+ die("Cannot read symbol table: %s\n",
+ strerror(errno));
+ }
++ base = 0;
++ for (j = 0; j < ehdr.e_phnum; j++) {
++ if (phdr[j].p_type != PT_LOAD )
++ continue;
++ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
++ continue;
++ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
++ break;
++ }
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
+ Elf32_Rel *rel = &sec->reltab[j];
+- rel->r_offset = elf32_to_cpu(rel->r_offset);
++ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
+ rel->r_info = elf32_to_cpu(rel->r_info);
+ }
+ }
+@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
+
+ static void print_absolute_symbols(void)
+ {
+- int i;
++ unsigned int i;
+ printf("Absolute symbols\n");
+ printf(" Num: Value Size Type Bind Visibility Name\n");
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+ continue;
+@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
+
+ static void print_absolute_relocs(void)
+ {
+- int i, printed = 0;
++ unsigned int i, printed = 0;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ struct section *sec_applies, *sec_symtab;
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+ if (sec->shdr.sh_type != SHT_REL) {
+ continue;
+ }
+@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
+
+ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+ {
+- int i;
++ unsigned int i;
+ /* Walk through the relocations */
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+ struct section *sec_applies, *sec_symtab;
+- int j;
++ unsigned int j;
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+ !is_rel_reloc(sym_name(sym_strtab, sym))) {
+ continue;
+ }
++ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
++ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
++ continue;
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
++ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
++ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
++ continue;
++#endif
++
+ switch (r_type) {
+ case R_386_NONE:
+ case R_386_PC32:
+@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
+
+ static void emit_relocs(int as_text)
+ {
+- int i;
++ unsigned int i;
+ /* Count how many relocations I have and allocate space for them. */
+ reloc_count = 0;
+ walk_relocs(count_reloc);
+@@ -665,6 +725,7 @@ int main(int argc, char **argv)
+ fname, strerror(errno));
+ }
+ read_ehdr(fp);
++ read_phdrs(fp);
+ read_shdrs(fp);
+ read_strtabs(fp);
+ read_symtabs(fp);
+diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
+index 4d3ff03..e4972ff 100644
+--- a/arch/x86/boot/cpucheck.c
++++ b/arch/x86/boot/cpucheck.c
+@@ -74,7 +74,7 @@ static int has_fpu(void)
+ u16 fcw = -1, fsw = -1;
+ u32 cr0;
+
+- asm("movl %%cr0,%0" : "=r" (cr0));
++ asm volatile("movl %%cr0,%0" : "=r" (cr0));
+ if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
+ cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
+ asm volatile("movl %0,%%cr0" : : "r" (cr0));
+@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
+ {
+ u32 f0, f1;
+
+- asm("pushfl ; "
++ asm volatile("pushfl ; "
+ "pushfl ; "
+ "popl %0 ; "
+ "movl %0,%1 ; "
+@@ -115,7 +115,7 @@ static void get_flags(void)
+ set_bit(X86_FEATURE_FPU, cpu.flags);
+
+ if (has_eflag(X86_EFLAGS_ID)) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_intel_level),
+ "=b" (cpu_vendor[0]),
+ "=d" (cpu_vendor[1]),
+@@ -124,7 +124,7 @@ static void get_flags(void)
+
+ if (max_intel_level >= 0x00000001 &&
+ max_intel_level <= 0x0000ffff) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (tfms),
+ "=c" (cpu.flags[4]),
+ "=d" (cpu.flags[0])
+@@ -136,7 +136,7 @@ static void get_flags(void)
+ cpu.model += ((tfms >> 16) & 0xf) << 4;
+ }
+
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_amd_level)
+ : "a" (0x80000000)
+ : "ebx", "ecx", "edx");
+@@ -144,7 +144,7 @@ static void get_flags(void)
+ if (max_amd_level >= 0x80000001 &&
+ max_amd_level <= 0x8000ffff) {
+ u32 eax = 0x80000001;
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "+a" (eax),
+ "=c" (cpu.flags[6]),
+ "=d" (cpu.flags[1])
+@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 ecx = MSR_K7_HWCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax &= ~(1 << 15);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ get_flags(); /* Make sure it really did something */
+ err = check_flags();
+@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 ecx = MSR_VIA_FCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax |= (1<<1)|(1<<7);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ set_bit(X86_FEATURE_CX8, cpu.flags);
+ err = check_flags();
+@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 eax, edx;
+ u32 level = 1;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+- asm("cpuid"
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
++ asm volatile("cpuid"
+ : "+a" (level), "=d" (cpu.flags[0])
+ : : "ecx", "ebx");
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ err = check_flags();
+ }
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index bdb4d45..0476680 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
+ # single linked list of
+ # struct setup_data
+
+-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
+
+ #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
+ #define VO_INIT_SIZE (VO__end - VO__text)
+diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
+index db75d07..8e6d0af 100644
+--- a/arch/x86/boot/memory.c
++++ b/arch/x86/boot/memory.c
+@@ -19,7 +19,7 @@
+
+ static int detect_memory_e820(void)
+ {
+- int count = 0;
++ unsigned int count = 0;
+ struct biosregs ireg, oreg;
+ struct e820entry *desc = boot_params.e820_map;
+ static struct e820entry buf; /* static so it is zeroed */
+diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
+index 11e8c6e..fdbb1ed 100644
+--- a/arch/x86/boot/video-vesa.c
++++ b/arch/x86/boot/video-vesa.c
+@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
+
+ boot_params.screen_info.vesapm_seg = oreg.es;
+ boot_params.screen_info.vesapm_off = oreg.di;
++ boot_params.screen_info.vesapm_size = oreg.cx;
+ }
+
+ /*
+diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
+index 43eda28..5ab5fdb 100644
+--- a/arch/x86/boot/video.c
++++ b/arch/x86/boot/video.c
+@@ -96,7 +96,7 @@ static void store_mode_params(void)
+ static unsigned int get_entry(void)
+ {
+ char entry_buf[4];
+- int i, len = 0;
++ unsigned int i, len = 0;
+ int key;
+ unsigned int v;
+
+diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
+index 5b577d5..3c1fed4 100644
+--- a/arch/x86/crypto/aes-x86_64-asm_64.S
++++ b/arch/x86/crypto/aes-x86_64-asm_64.S
+@@ -8,6 +8,8 @@
+ * including this sentence is retained in full.
+ */
+
++#include <asm/alternative-asm.h>
++
+ .extern crypto_ft_tab
+ .extern crypto_it_tab
+ .extern crypto_fl_tab
+@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
+ je B192; \
+ leaq 32(r9),r9;
+
++#define ret pax_force_retaddr 0, 1; ret
++
+ #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
+ movq r1,r2; \
+ movq r3,r4; \
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index be6d9e3..21fbbca 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -31,6 +31,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/alternative-asm.h>
+
+ #ifdef __x86_64__
+ .data
+@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
+ pop %r14
+ pop %r13
+ pop %r12
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_gcm_dec)
+
+
+ /*****************************************************************************
+@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
+ pop %r14
+ pop %r13
+ pop %r12
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_gcm_enc)
+
+ #endif
+
+@@ -1714,6 +1719,7 @@ _key_expansion_256a:
+ pxor %xmm1, %xmm0
+ movaps %xmm0, (TKEYP)
+ add $0x10, TKEYP
++ pax_force_retaddr_bts
+ ret
+
+ .align 4
+@@ -1738,6 +1744,7 @@ _key_expansion_192a:
+ shufps $0b01001110, %xmm2, %xmm1
+ movaps %xmm1, 0x10(TKEYP)
+ add $0x20, TKEYP
++ pax_force_retaddr_bts
+ ret
+
+ .align 4
+@@ -1757,6 +1764,7 @@ _key_expansion_192b:
+
+ movaps %xmm0, (TKEYP)
+ add $0x10, TKEYP
++ pax_force_retaddr_bts
+ ret
+
+ .align 4
+@@ -1769,6 +1777,7 @@ _key_expansion_256b:
+ pxor %xmm1, %xmm2
+ movaps %xmm2, (TKEYP)
+ add $0x10, TKEYP
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
+ #ifndef __x86_64__
+ popl KEYP
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_set_key)
+
+ /*
+ * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
+ popl KLEN
+ popl KEYP
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_enc)
+
+ /*
+ * _aesni_enc1: internal ABI
+@@ -1959,6 +1972,7 @@ _aesni_enc1:
+ AESENC KEY STATE
+ movaps 0x70(TKEYP), KEY
+ AESENCLAST KEY STATE
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -2067,6 +2081,7 @@ _aesni_enc4:
+ AESENCLAST KEY STATE2
+ AESENCLAST KEY STATE3
+ AESENCLAST KEY STATE4
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
+ popl KLEN
+ popl KEYP
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_dec)
+
+ /*
+ * _aesni_dec1: internal ABI
+@@ -2146,6 +2163,7 @@ _aesni_dec1:
+ AESDEC KEY STATE
+ movaps 0x70(TKEYP), KEY
+ AESDECLAST KEY STATE
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -2254,6 +2272,7 @@ _aesni_dec4:
+ AESDECLAST KEY STATE2
+ AESDECLAST KEY STATE3
+ AESDECLAST KEY STATE4
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
+ popl KEYP
+ popl LEN
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_ecb_enc)
+
+ /*
+ * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
+ popl KEYP
+ popl LEN
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_ecb_dec)
+
+ /*
+ * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
+ popl LEN
+ popl IVP
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_cbc_enc)
+
+ /*
+ * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
+ popl LEN
+ popl IVP
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_cbc_dec)
+
+ #ifdef __x86_64__
+ .align 16
+@@ -2524,6 +2551,7 @@ _aesni_inc_init:
+ mov $1, TCTR_LOW
+ MOVQ_R64_XMM TCTR_LOW INC
+ MOVQ_R64_XMM CTR TCTR_LOW
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -2552,6 +2580,7 @@ _aesni_inc:
+ .Linc_low:
+ movaps CTR, IV
+ PSHUFB_XMM BSWAP_MASK IV
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
+ .Lctr_enc_ret:
+ movups IV, (IVP)
+ .Lctr_enc_just_ret:
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_ctr_enc)
+ #endif
+diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
+index 391d245..67f35c2 100644
+--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
++++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
+@@ -20,6 +20,8 @@
+ *
+ */
+
++#include <asm/alternative-asm.h>
++
+ .file "blowfish-x86_64-asm.S"
+ .text
+
+@@ -151,9 +153,11 @@ __blowfish_enc_blk:
+ jnz __enc_xor;
+
+ write_block();
++ pax_force_retaddr 0, 1
+ ret;
+ __enc_xor:
+ xor_block();
++ pax_force_retaddr 0, 1
+ ret;
+
+ .align 8
+@@ -188,6 +192,7 @@ blowfish_dec_blk:
+
+ movq %r11, %rbp;
+
++ pax_force_retaddr 0, 1
+ ret;
+
+ /**********************************************************************
+@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
+
+ popq %rbx;
+ popq %rbp;
++ pax_force_retaddr 0, 1
+ ret;
+
+ __enc_xor4:
+@@ -349,6 +355,7 @@ __enc_xor4:
+
+ popq %rbx;
+ popq %rbp;
++ pax_force_retaddr 0, 1
+ ret;
+
+ .align 8
+@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
+ popq %rbx;
+ popq %rbp;
+
++ pax_force_retaddr 0, 1
+ ret;
+
+diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+index 6214a9b..1f4fc9a 100644
+--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
++++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+@@ -1,3 +1,5 @@
++#include <asm/alternative-asm.h>
++
+ # enter ECRYPT_encrypt_bytes
+ .text
+ .p2align 5
+@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr 0, 1
+ ret
+ # bytesatleast65:
+ ._bytesatleast65:
+@@ -891,6 +894,7 @@ ECRYPT_keysetup:
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr
+ ret
+ # enter ECRYPT_ivsetup
+ .text
+@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr
+ ret
+diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
+index b2c2f57..8470cab 100644
+--- a/arch/x86/crypto/sha1_ssse3_asm.S
++++ b/arch/x86/crypto/sha1_ssse3_asm.S
+@@ -28,6 +28,8 @@
+ * (at your option) any later version.
+ */
+
++#include <asm/alternative-asm.h>
++
+ #define CTX %rdi // arg1
+ #define BUF %rsi // arg2
+ #define CNT %rdx // arg3
+@@ -104,6 +106,7 @@
+ pop %r12
+ pop %rbp
+ pop %rbx
++ pax_force_retaddr 0, 1
+ ret
+
+ .size \name, .-\name
+diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+index 5b012a2..36d5364 100644
+--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+@@ -20,6 +20,8 @@
+ *
+ */
+
++#include <asm/alternative-asm.h>
++
+ .file "twofish-x86_64-asm-3way.S"
+ .text
+
+@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
+ popq %r13;
+ popq %r14;
+ popq %r15;
++ pax_force_retaddr 0, 1
+ ret;
+
+ __enc_xor3:
+@@ -271,6 +274,7 @@ __enc_xor3:
+ popq %r13;
+ popq %r14;
+ popq %r15;
++ pax_force_retaddr 0, 1
+ ret;
+
+ .global twofish_dec_blk_3way
+@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
+ popq %r13;
+ popq %r14;
+ popq %r15;
++ pax_force_retaddr 0, 1
+ ret;
+
+diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
+index 7bcf3fc..f53832f 100644
+--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
+@@ -21,6 +21,7 @@
+ .text
+
+ #include <asm/asm-offsets.h>
++#include <asm/alternative-asm.h>
+
+ #define a_offset 0
+ #define b_offset 4
+@@ -268,6 +269,7 @@ twofish_enc_blk:
+
+ popq R1
+ movq $1,%rax
++ pax_force_retaddr 0, 1
+ ret
+
+ twofish_dec_blk:
+@@ -319,4 +321,5 @@ twofish_dec_blk:
+
+ popq R1
+ movq $1,%rax
++ pax_force_retaddr 0, 1
+ ret
+diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
+index fd84387..0b4af7d 100644
+--- a/arch/x86/ia32/ia32_aout.c
++++ b/arch/x86/ia32/ia32_aout.c
+@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
+ unsigned long dump_start, dump_size;
+ struct user32 dump;
+
++ memset(&dump, 0, sizeof(dump));
++
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ has_dumped = 1;
+diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
+index 6557769..ef6ae89 100644
+--- a/arch/x86/ia32/ia32_signal.c
++++ b/arch/x86/ia32/ia32_signal.c
+@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
+ }
+ seg = get_fs();
+ set_fs(KERNEL_DS);
+- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
++ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
+ set_fs(seg);
+ if (ret >= 0 && uoss_ptr) {
+ if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
+@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
+ */
+ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+ size_t frame_size,
+- void **fpstate)
++ void __user **fpstate)
+ {
+ unsigned long sp;
+
+@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+
+ if (used_math()) {
+ sp = sp - sig_xstate_ia32_size;
+- *fpstate = (struct _fpstate_ia32 *) sp;
++ *fpstate = (struct _fpstate_ia32 __user *) sp;
+ if (save_i387_xstate_ia32(*fpstate) < 0)
+ return (void __user *) -1L;
+ }
+@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+ sp -= frame_size;
+ /* Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ return (void __user *) sp;
+ }
+
+@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
+ * These are actually not used anymore, but left because some
+ * gdb versions depend on them as a marker.
+ */
+- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 0xb8,
+ __NR_ia32_rt_sigreturn,
+ 0x80cd,
+- 0,
++ 0
+ };
+
+ frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
+@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
++ else if (current->mm->context.vdso)
++ /* Return stub is in 32bit vsyscall page */
++ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+ else
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
+- rt_sigreturn);
++ restorer = &frame->retcode;
+ put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+
+ /*
+ * Not actually used anymore, but left because some gdb
+ * versions need it.
+ */
+- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index a6253ec..4ad2120 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -13,7 +13,9 @@
+ #include <asm/thread_info.h>
+ #include <asm/segment.h>
+ #include <asm/irqflags.h>
++#include <asm/pgtable.h>
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
+ ENDPROC(native_irq_enable_sysexit)
+ #endif
+
++ .macro pax_enter_kernel_user
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushq %rax
++ pushq %r11
++ call pax_randomize_kstack
++ popq %r11
++ popq %rax
++#endif
++ .endm
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
+ /*
+ * 32bit SYSENTER instruction entry.
+ *
+@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
+ CFI_REGISTER rsp,rbp
+ SWAPGS_UNSAFE_STACK
+ movq PER_CPU_VAR(kernel_stack), %rsp
+- addq $(KERNEL_STACK_OFFSET),%rsp
+- /*
+- * No need to follow this irqs on/off section: the syscall
+- * disabled irqs, here we enable it straight after entry:
+- */
+- ENABLE_INTERRUPTS(CLBR_NONE)
+ movl %ebp,%ebp /* zero extension */
+ pushq_cfi $__USER32_DS
+ /*CFI_REL_OFFSET ss,0*/
+@@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
+ CFI_REL_OFFSET rsp,0
+ pushfq_cfi
+ /*CFI_REL_OFFSET rflags,0*/
+- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
+- CFI_REGISTER rip,r10
++ orl $X86_EFLAGS_IF,(%rsp)
++ GET_THREAD_INFO(%r11)
++ movl TI_sysenter_return(%r11), %r11d
++ CFI_REGISTER rip,r11
+ pushq_cfi $__USER32_CS
+ /*CFI_REL_OFFSET cs,0*/
+ movl %eax, %eax
+- pushq_cfi %r10
++ pushq_cfi %r11
+ CFI_REL_OFFSET rip,0
+ pushq_cfi %rax
+ cld
+ SAVE_ARGS 0,1,0
++ pax_enter_kernel_user
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs, here we enable it straight after entry:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
+ /* no need to do an access_ok check here because rbp has been
+ 32bit zero extended */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%r11
++ add %r11,%rbp
++#endif
++
+ 1: movl (%rbp),%ebp
+ .section __ex_table,"a"
+ .quad 1b,ia32_badarg
+ .previous
+- GET_THREAD_INFO(%r10)
+- orl $TS_COMPAT,TI_status(%r10)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ CFI_REMEMBER_STATE
+ jnz sysenter_tracesys
+ cmpq $(IA32_NR_syscalls-1),%rax
+@@ -162,13 +198,15 @@ sysenter_do_call:
+ sysenter_dispatch:
+ call *ia32_sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
+- GET_THREAD_INFO(%r10)
++ GET_THREAD_INFO(%r11)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
+ jnz sysexit_audit
+ sysexit_from_sys_call:
+- andl $~TS_COMPAT,TI_status(%r10)
++ pax_exit_kernel_user
++ pax_erase_kstack
++ andl $~TS_COMPAT,TI_status(%r11)
+ /* clear IF, that popfq doesn't enable interrupts early */
+ andl $~0x200,EFLAGS-R11(%rsp)
+ movl RIP-R11(%rsp),%edx /* User %eip */
+@@ -194,6 +232,9 @@ sysexit_from_sys_call:
+ movl %eax,%esi /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
+ call audit_syscall_entry
++
++ pax_erase_kstack
++
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja ia32_badsys
+@@ -205,7 +246,7 @@ sysexit_from_sys_call:
+ .endm
+
+ .macro auditsys_exit exit
+- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
++ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+ jnz ia32_ret_from_sys_call
+ TRACE_IRQS_ON
+ sti
+@@ -215,12 +256,12 @@ sysexit_from_sys_call:
+ movzbl %al,%edi /* zero-extend that into %edi */
+ inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
+ call audit_syscall_exit
+- GET_THREAD_INFO(%r10)
++ GET_THREAD_INFO(%r11)
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
+ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
+ cli
+ TRACE_IRQS_OFF
+- testl %edi,TI_flags(%r10)
++ testl %edi,TI_flags(%r11)
+ jz \exit
+ CLEAR_RREGS -ARGOFFSET
+ jmp int_with_check
+@@ -238,7 +279,7 @@ sysexit_audit:
+
+ sysenter_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+ jz sysenter_auditsys
+ #endif
+ SAVE_REST
+@@ -246,6 +287,9 @@ sysenter_tracesys:
+ movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
+ movq %rsp,%rdi /* &pt_regs -> arg1 */
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+@@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
+ ENTRY(ia32_cstar_target)
+ CFI_STARTPROC32 simple
+ CFI_SIGNAL_FRAME
+- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
++ CFI_DEF_CFA rsp,0
+ CFI_REGISTER rip,rcx
+ /*CFI_REGISTER rflags,r11*/
+ SWAPGS_UNSAFE_STACK
+ movl %esp,%r8d
+ CFI_REGISTER rsp,r8
+ movq PER_CPU_VAR(kernel_stack),%rsp
++ SAVE_ARGS 8*6,0,0
++ pax_enter_kernel_user
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
+ */
+ ENABLE_INTERRUPTS(CLBR_NONE)
+- SAVE_ARGS 8,0,0
+ movl %eax,%eax /* zero extension */
+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ movq %rcx,RIP-ARGOFFSET(%rsp)
+@@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
+ /* no need to do an access_ok check here because r8 has been
+ 32bit zero extended */
+ /* hardware stack frame is complete now */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%r11
++ add %r11,%r8
++#endif
++
+ 1: movl (%r8),%r9d
+ .section __ex_table,"a"
+ .quad 1b,ia32_badarg
+ .previous
+- GET_THREAD_INFO(%r10)
+- orl $TS_COMPAT,TI_status(%r10)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ CFI_REMEMBER_STATE
+ jnz cstar_tracesys
+ cmpq $IA32_NR_syscalls-1,%rax
+@@ -321,13 +372,15 @@ cstar_do_call:
+ cstar_dispatch:
+ call *ia32_sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
+- GET_THREAD_INFO(%r10)
++ GET_THREAD_INFO(%r11)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
+ jnz sysretl_audit
+ sysretl_from_sys_call:
+- andl $~TS_COMPAT,TI_status(%r10)
++ pax_exit_kernel_user
++ pax_erase_kstack
++ andl $~TS_COMPAT,TI_status(%r11)
+ RESTORE_ARGS 0,-ARG_SKIP,0,0,0
+ movl RIP-ARGOFFSET(%rsp),%ecx
+ CFI_REGISTER rip,rcx
+@@ -355,7 +408,7 @@ sysretl_audit:
+
+ cstar_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+ jz cstar_auditsys
+ #endif
+ xchgl %r9d,%ebp
+@@ -364,6 +417,9 @@ cstar_tracesys:
+ movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
+ movq %rsp,%rdi /* &pt_regs -> arg1 */
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ xchgl %ebp,%r9d
+@@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
+ CFI_REL_OFFSET rip,RIP-RIP
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
+ SWAPGS
+- /*
+- * No need to follow this irqs on/off section: the syscall
+- * disabled irqs and here we enable it straight after entry:
+- */
+- ENABLE_INTERRUPTS(CLBR_NONE)
+ movl %eax,%eax
+ pushq_cfi %rax
+ cld
+ /* note the registers are not zero extended to the sf.
+ this could be a problem. */
+ SAVE_ARGS 0,1,0
+- GET_THREAD_INFO(%r10)
+- orl $TS_COMPAT,TI_status(%r10)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
++ pax_enter_kernel_user
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ jnz ia32_tracesys
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja ia32_badsys
+@@ -441,6 +498,9 @@ ia32_tracesys:
+ movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
+ movq %rsp,%rdi /* &pt_regs -> arg1 */
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+@@ -455,6 +515,7 @@ ia32_badsys:
+
+ quiet_ni_syscall:
+ movq $-ENOSYS,%rax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+
+diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
+index f6f5c53..b358b28 100644
+--- a/arch/x86/ia32/sys_ia32.c
++++ b/arch/x86/ia32/sys_ia32.c
+@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
+ */
+ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
+ {
+- typeof(ubuf->st_uid) uid = 0;
+- typeof(ubuf->st_gid) gid = 0;
++ typeof(((struct stat64 *)0)->st_uid) uid = 0;
++ typeof(((struct stat64 *)0)->st_gid) gid = 0;
+ SET_UID(uid, stat->uid);
+ SET_GID(gid, stat->gid);
+ if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
+@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
+ }
+ set_fs(KERNEL_DS);
+ ret = sys_rt_sigprocmask(how,
+- set ? (sigset_t __user *)&s : NULL,
+- oset ? (sigset_t __user *)&s : NULL,
++ set ? (sigset_t __force_user *)&s : NULL,
++ oset ? (sigset_t __force_user *)&s : NULL,
+ sigsetsize);
+ set_fs(old_fs);
+ if (ret)
+@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
+ return alarm_setitimer(seconds);
+ }
+
+-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
++asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
+ int options)
+ {
+ return compat_sys_wait4(pid, stat_addr, options, NULL);
+@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
++ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
+ set_fs(old_fs);
+ if (put_compat_timespec(&t, interval))
+ return -EFAULT;
+@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
++ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
+ set_fs(old_fs);
+ if (!ret) {
+ switch (_NSIG_WORDS) {
+@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
+ if (copy_siginfo_from_user32(&info, uinfo))
+ return -EFAULT;
+ set_fs(KERNEL_DS);
+- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
++ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
+ set_fs(old_fs);
+ return ret;
+ }
+@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
+ return -EFAULT;
+
+ set_fs(KERNEL_DS);
+- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
++ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
+ count);
+ set_fs(old_fs);
+
+diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
+index 091508b..e245ff2 100644
+--- a/arch/x86/include/asm/alternative-asm.h
++++ b/arch/x86/include/asm/alternative-asm.h
+@@ -4,10 +4,10 @@
+
+ #ifdef CONFIG_SMP
+ .macro LOCK_PREFIX
+-1: lock
++672: lock
+ .section .smp_locks,"a"
+ .balign 4
+- .long 1b - .
++ .long 672b - .
+ .previous
+ .endm
+ #else
+@@ -15,6 +15,45 @@
+ .endm
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
++ .macro pax_force_retaddr_bts rip=0
++ btsq $63,\rip(%rsp)
++ .endm
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++ .macro pax_force_retaddr rip=0, reload=0
++ btsq $63,\rip(%rsp)
++ .endm
++ .macro pax_force_fptr ptr
++ btsq $63,\ptr
++ .endm
++ .macro pax_set_fptr_mask
++ .endm
++#endif
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ .macro pax_force_retaddr rip=0, reload=0
++ .if \reload
++ pax_set_fptr_mask
++ .endif
++ orq %r10,\rip(%rsp)
++ .endm
++ .macro pax_force_fptr ptr
++ orq %r10,\ptr
++ .endm
++ .macro pax_set_fptr_mask
++ movabs $0x8000000000000000,%r10
++ .endm
++#endif
++#else
++ .macro pax_force_retaddr rip=0, reload=0
++ .endm
++ .macro pax_force_fptr ptr
++ .endm
++ .macro pax_force_retaddr_bts rip=0
++ .endm
++ .macro pax_set_fptr_mask
++ .endm
++#endif
++
+ .macro altinstruction_entry orig alt feature orig_len alt_len
+ .long \orig - .
+ .long \alt - .
+diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
+index 37ad100..7d47faa 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ ".section .discard,\"aw\",@progbits\n" \
+ " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
+ ".previous\n" \
+- ".section .altinstr_replacement, \"ax\"\n" \
++ ".section .altinstr_replacement, \"a\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous"
+
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 1a6c09a..fec2432 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+
+-extern unsigned int apic_verbosity;
++extern int apic_verbosity;
+ extern int local_apic_timer_c2_ok;
+
+ extern int disable_apic;
+diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
+index 20370c6..a2eb9b0 100644
+--- a/arch/x86/include/asm/apm.h
++++ b/arch/x86/include/asm/apm.h
+@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
+index 58cb6d4..ca9010d 100644
+--- a/arch/x86/include/asm/atomic.h
++++ b/arch/x86/include/asm/atomic.h
+@@ -22,7 +22,18 @@
+ */
+ static inline int atomic_read(const atomic_t *v)
+ {
+- return (*(volatile int *)&(v)->counter);
++ return (*(volatile const int *)&(v)->counter);
++}
++
++/**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return (*(volatile const int *)&(v)->counter);
+ }
+
+ /**
+@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
+ }
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
+ */
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
+ */
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_sub_unchecked - subtract integer from atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+ */
+ static inline void atomic_inc(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
+ */
+ static inline void atomic_dec(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_dec_unchecked - decrement atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decl %0; sete %1"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incl %0; sete %1"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
++ : "+m" (v->counter), "=qm" (c)
++ : : "memory");
++ return c != 0;
++}
++
++/**
++ * atomic_inc_and_test_unchecked - increment and test
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1
++ * and returns true if the result is zero, or false for all
++ * other cases.
++ */
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ unsigned char c;
++
++ asm volatile(LOCK_PREFIX "incl %0\n"
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
+ goto no_xadd;
+ #endif
+ /* Modern 486+ processor */
+- return i + xadd(&v->counter, i);
++ return i + xadd_check_overflow(&v->counter, i);
+
+ #ifdef CONFIG_M386
+ no_xadd: /* Legacy 386 processor */
+@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
+ }
+
+ /**
++ * atomic_add_return_unchecked - add integer and return
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++#ifdef CONFIG_M386
++ int __i;
++ unsigned long flags;
++ if (unlikely(boot_cpu_data.x86 <= 3))
++ goto no_xadd;
++#endif
++ /* Modern 486+ processor */
++ return i + xadd(&v->counter, i);
++
++#ifdef CONFIG_M386
++no_xadd: /* Legacy 386 processor */
++ raw_local_irq_save(flags);
++ __i = atomic_read_unchecked(v);
++ atomic_set_unchecked(v, i + __i);
++ raw_local_irq_restore(flags);
++ return i + __i;
++#endif
++}
++
++/**
+ * atomic_sub_return - subtract integer and return
+ * @v: pointer of type atomic_t
+ * @i: integer value to subtract
+@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+ }
+
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ return cmpxchg(&v->counter, old, new);
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
+ static inline int atomic_xchg(atomic_t *v, int new)
+ {
+ return xchg(&v->counter, new);
+ }
+
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
++
+ /**
+ * __atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
+ */
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ return c;
+ }
+
++/**
++ * atomic_inc_not_zero_hint - increment if not null
++ * @v: pointer of type atomic_t
++ * @hint: probable value of the atomic before the increment
++ *
++ * This version of atomic_inc_not_zero() gives a hint of probable
++ * value of the atomic. This helps processor to not read the memory
++ * before doing the atomic read/modify/write cycle, lowering
++ * number of bus transactions on some arches.
++ *
++ * Returns: 0 if increment was not done, 1 otherwise.
++ */
++#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
++static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
++{
++ int val, c = hint, new;
++
++ /* sanity test, should be removed by compiler if hint is a constant */
++ if (!hint)
++ return __atomic_add_unless(v, 1, 0);
++
++ do {
++ asm volatile("incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c));
++
++ val = atomic_cmpxchg(v, c, new);
++ if (val == c)
++ return 1;
++ c = val;
++ } while (c);
++
++ return 0;
++}
+
+ /*
+ * atomic_dec_if_positive - decrement by 1 if old value positive
+diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
+index 24098aa..1e37723 100644
+--- a/arch/x86/include/asm/atomic64_32.h
++++ b/arch/x86/include/asm/atomic64_32.h
+@@ -12,6 +12,14 @@ typedef struct {
+ u64 __aligned(8) counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(val) { (val) }
+
+ #ifdef CONFIG_X86_CMPXCHG64
+@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
+ }
+
+ /**
++ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
++ * @p: pointer to type atomic64_unchecked_t
++ * @o: expected value
++ * @n: new value
++ *
++ * Atomically sets @v to @n if it was equal to @o and returns
++ * the old value.
++ */
++
++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
++{
++ return cmpxchg64(&v->counter, o, n);
++}
++
++/**
+ * atomic64_xchg - xchg atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @n: value to assign
+@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @n: value to assign
++ *
++ * Atomically sets the value of @v to @n.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++ unsigned high = (unsigned)(i >> 32);
++ unsigned low = (unsigned)i;
++ asm volatile(ATOMIC64_ALTERNATIVE(set)
++ : "+b" (low), "+c" (high)
++ : "S" (v)
++ : "eax", "edx", "memory"
++ );
++}
++
++/**
+ * atomic64_read - read atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
+ }
+
+ /**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v and returns it.
++ */
++static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
++{
++ long long r;
++ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
++ : "=A" (r), "+c" (v)
++ : : "memory"
++ );
++ return r;
++ }
++
++/**
+ * atomic64_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
+ return i;
+ }
+
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + *@v
++ */
++static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
++{
++ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
++ : "+A" (i), "+c" (v)
++ : : "memory"
++ );
++ return i;
++}
++
+ /*
+ * Other variants with different arithmetic operators:
+ */
+@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
+ return a;
+ }
+
++static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ long long a;
++ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
++ : "=A" (a)
++ : "S" (v)
++ : "memory", "ecx"
++ );
++ return a;
++}
++
+ static inline long long atomic64_dec_return(atomic64_t *v)
+ {
+ long long a;
+@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
+ }
+
+ /**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
++{
++ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
++ : "+A" (i), "+c" (v)
++ : : "memory"
++ );
++ return i;
++}
++
++/**
+ * atomic64_sub - subtract the atomic64 variable
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
+index 0e1cbfc..5623683 100644
+--- a/arch/x86/include/asm/atomic64_64.h
++++ b/arch/x86/include/asm/atomic64_64.h
+@@ -18,7 +18,19 @@
+ */
+ static inline long atomic64_read(const atomic64_t *v)
+ {
+- return (*(volatile long *)&(v)->counter);
++ return (*(volatile const long *)&(v)->counter);
++}
++
++/**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer of type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ * Doesn't imply a read memory barrier.
++ */
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return (*(volatile const long *)&(v)->counter);
+ }
+
+ /**
+@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic64_add - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
+ */
+ static inline void atomic64_add(long i, atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "addq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "addq %1,%0"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
+ */
+ static inline void atomic64_sub(long i, atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subq %1,%0"
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_sub_unchecked - subtract the atomic64 variable
++ * @i: integer value to subtract
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+ }
+@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+ */
+ static inline void atomic64_inc(atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "incq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
+ */
+ static inline void atomic64_dec(atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decq %0"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_dec_unchecked - decrement atomic64 variable
++ * @v: pointer to type atomic64_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decq %0\n"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decq %0; sete %1"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incq %0; sete %1"
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
+ */
+ static inline long atomic64_add_return(long i, atomic64_t *v)
+ {
++ return i + xadd_check_overflow(&v->counter, i);
++}
++
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
+ return i + xadd(&v->counter, i);
+ }
+
+@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
+ }
+
+ #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_return_unchecked(1, v);
++}
+ #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
+
+ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+ return cmpxchg(&v->counter, old, new);
+ }
+
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
+ static inline long atomic64_xchg(atomic64_t *v, long new)
+ {
+ return xchg(&v->counter, new);
+@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
+ */
+ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("add %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "sub %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
+index 1775d6e..b65017f 100644
+--- a/arch/x86/include/asm/bitops.h
++++ b/arch/x86/include/asm/bitops.h
+@@ -38,7 +38,7 @@
+ * a mask operation on a byte.
+ */
+ #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
+-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
++#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
+ #define CONST_MASK(nr) (1 << ((nr) & 7))
+
+ /**
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index 5e1a2ee..c9f9533 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -11,10 +11,15 @@
+ #include <asm/pgtable_types.h>
+
+ /* Physical address where kernel should be loaded. */
+-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ + (CONFIG_PHYSICAL_ALIGN - 1)) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
++#ifndef __ASSEMBLY__
++extern unsigned char __LOAD_PHYSICAL_ADDR[];
++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
++#endif
++
+ /* Minimum kernel alignment, as a power of two */
+ #ifdef CONFIG_X86_64
+ #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
+diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
+index 48f99f1..d78ebf9 100644
+--- a/arch/x86/include/asm/cache.h
++++ b/arch/x86/include/asm/cache.h
+@@ -5,12 +5,13 @@
+
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
++#define __read_only __attribute__((__section__(".data..read_only")))
+
+ #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
+-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
++#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
+
+ #ifdef CONFIG_X86_VSMP
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
+index 4e12668..501d239 100644
+--- a/arch/x86/include/asm/cacheflush.h
++++ b/arch/x86/include/asm/cacheflush.h
+@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
+ unsigned long pg_flags = pg->flags & _PGMT_MASK;
+
+ if (pg_flags == _PGMT_DEFAULT)
+- return -1;
++ return ~0UL;
+ else if (pg_flags == _PGMT_WC)
+ return _PAGE_CACHE_WC;
+ else if (pg_flags == _PGMT_UC_MINUS)
+diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
+index 46fc474..b02b0f9 100644
+--- a/arch/x86/include/asm/checksum_32.h
++++ b/arch/x86/include/asm/checksum_32.h
+@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
+ /*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
+ int *err_ptr)
+ {
+ might_sleep();
+- return csum_partial_copy_generic((__force void *)src, dst,
++ return csum_partial_copy_generic_from_user((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ }
+
+@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
+ {
+ might_sleep();
+ if (access_ok(VERIFY_WRITE, dst, len))
+- return csum_partial_copy_generic(src, (__force void *)dst,
++ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+
+ if (len)
+diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
+index 5d3acdf..6447a02 100644
+--- a/arch/x86/include/asm/cmpxchg.h
++++ b/arch/x86/include/asm/cmpxchg.h
+@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
+ __compiletime_error("Bad argument size for cmpxchg");
+ extern void __xadd_wrong_size(void)
+ __compiletime_error("Bad argument size for xadd");
++extern void __xadd_check_overflow_wrong_size(void)
++ __compiletime_error("Bad argument size for xadd_check_overflow");
+
+ /*
+ * Constants for operation sizes. On 32-bit, the 64-bit size it set to
+@@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
+ __ret; \
+ })
+
++#define __xadd_check_overflow(ptr, inc, lock) \
++ ({ \
++ __typeof__ (*(ptr)) __ret = (inc); \
++ switch (sizeof(*(ptr))) { \
++ case __X86_CASE_L: \
++ asm volatile (lock "xaddl %0, %1\n" \
++ "jno 0f\n" \
++ "mov %0,%1\n" \
++ "int $4\n0:\n" \
++ _ASM_EXTABLE(0b, 0b) \
++ : "+r" (__ret), "+m" (*(ptr)) \
++ : : "memory", "cc"); \
++ break; \
++ case __X86_CASE_Q: \
++ asm volatile (lock "xaddq %q0, %1\n" \
++ "jno 0f\n" \
++ "mov %0,%1\n" \
++ "int $4\n0:\n" \
++ _ASM_EXTABLE(0b, 0b) \
++ : "+r" (__ret), "+m" (*(ptr)) \
++ : : "memory", "cc"); \
++ break; \
++ default: \
++ __xadd_check_overflow_wrong_size(); \
++ } \
++ __ret; \
++ })
++
+ /*
+ * xadd() adds "inc" to "*ptr" and atomically returns the previous
+ * value of "*ptr".
+@@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
+ #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
+ #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
+
++#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
++
+ #endif /* ASM_X86_CMPXCHG_H */
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index f3444f7..051a196 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
+ ".previous\n"
+- ".section .altinstr_replacement,\"ax\"\n"
++ ".section .altinstr_replacement,\"a\"\n"
+ "3: movb $1,%0\n"
+ "4:\n"
+ ".previous\n"
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index 41935fa..3b40db8 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -4,6 +4,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/ldt.h>
+ #include <asm/mmu.h>
++#include <asm/pgtable.h>
+
+ #include <linux/smp.h>
+
+@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
+
+ desc->type = (info->read_exec_only ^ 1) << 1;
+ desc->type |= info->contents << 2;
++ desc->type |= info->seg_not_present ^ 1;
+
+ desc->s = 1;
+ desc->dpl = 0x3;
+@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
+ }
+
+ extern struct desc_ptr idt_descr;
+-extern gate_desc idt_table[];
+-
+-struct gdt_page {
+- struct desc_struct gdt[GDT_ENTRIES];
+-} __attribute__((aligned(PAGE_SIZE)));
+-
+-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
++extern gate_desc idt_table[256];
+
++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+- return per_cpu(gdt_page, cpu).gdt;
++ return cpu_gdt_table[cpu];
+ }
+
+ #ifdef CONFIG_X86_64
+@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
+ unsigned long base, unsigned dpl, unsigned flags,
+ unsigned short seg)
+ {
+- gate->a = (seg << 16) | (base & 0xffff);
+- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
++ gate->gate.offset_low = base;
++ gate->gate.seg = seg;
++ gate->gate.reserved = 0;
++ gate->gate.type = type;
++ gate->gate.s = 0;
++ gate->gate.dpl = dpl;
++ gate->gate.p = 1;
++ gate->gate.offset_high = base >> 16;
+ }
+
+ #endif
+@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
+
+ static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
+ {
++ pax_open_kernel();
+ memcpy(&idt[entry], gate, sizeof(*gate));
++ pax_close_kernel();
+ }
+
+ static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
+ {
++ pax_open_kernel();
+ memcpy(&ldt[entry], desc, 8);
++ pax_close_kernel();
+ }
+
+ static inline void
+@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
+ default: size = sizeof(*gdt); break;
+ }
+
++ pax_open_kernel();
+ memcpy(&gdt[entry], desc, size);
++ pax_close_kernel();
+ }
+
+ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
+@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
+
+ static inline void native_load_tr_desc(void)
+ {
++ pax_open_kernel();
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
++ pax_close_kernel();
+ }
+
+ static inline void native_load_gdt(const struct desc_ptr *dtr)
+@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+ unsigned int i;
+
++ pax_open_kernel();
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
++ pax_close_kernel();
+ }
+
+ #define _LDT_empty(info) \
+@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
+ desc->limit = (limit >> 16) & 0xf;
+ }
+
+-static inline void _set_gate(int gate, unsigned type, void *addr,
++static inline void _set_gate(int gate, unsigned type, const void *addr,
+ unsigned dpl, unsigned ist, unsigned seg)
+ {
+ gate_desc s;
+@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
+ * Pentium F0 0F bugfix can have resulted in the mapped
+ * IDT being write-protected.
+ */
+-static inline void set_intr_gate(unsigned int n, void *addr)
++static inline void set_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
+@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
+ /*
+ * This routine sets up an interrupt gate at directory privilege level 3.
+ */
+-static inline void set_system_intr_gate(unsigned int n, void *addr)
++static inline void set_system_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_system_trap_gate(unsigned int n, void *addr)
++static inline void set_system_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_trap_gate(unsigned int n, void *addr)
++static inline void set_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
+@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
+ static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
++ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
+ }
+
+-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
+ }
+
+-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+ }
+
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
++{
++ struct desc_struct d;
++
++ if (likely(limit))
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++ pack_descriptor(&d, base, limit, 0xFB, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
++}
++#endif
++
+ #endif /* _ASM_X86_DESC_H */
+diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
+index 278441f..b95a174 100644
+--- a/arch/x86/include/asm/desc_defs.h
++++ b/arch/x86/include/asm/desc_defs.h
+@@ -31,6 +31,12 @@ struct desc_struct {
+ unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
+ unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
+ };
++ struct {
++ u16 offset_low;
++ u16 seg;
++ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
++ unsigned offset_high: 16;
++ } gate;
+ };
+ } __attribute__((packed));
+
+diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
+index 908b969..a1f4eb4 100644
+--- a/arch/x86/include/asm/e820.h
++++ b/arch/x86/include/asm/e820.h
+@@ -69,7 +69,7 @@ struct e820map {
+ #define ISA_START_ADDRESS 0xa0000
+ #define ISA_END_ADDRESS 0x100000
+
+-#define BIOS_BEGIN 0x000a0000
++#define BIOS_BEGIN 0x000c0000
+ #define BIOS_END 0x00100000
+
+ #define BIOS_ROM_BASE 0xffe00000
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 5f962df..7289f09 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -238,7 +238,25 @@ extern int force_personality32;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#ifdef CONFIG_X86_32
++#define PAX_ELF_ET_DYN_BASE 0x10000000UL
++
++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#else
++#define PAX_ELF_ET_DYN_BASE 0x400000UL
++
++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#endif
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+@@ -291,9 +309,7 @@ do { \
+
+ #define ARCH_DLINFO \
+ do { \
+- if (vdso_enabled) \
+- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+- (unsigned long)current->mm->context.vdso); \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
+ } while (0)
+
+ #define AT_SYSINFO 32
+@@ -304,7 +320,7 @@ do { \
+
+ #endif /* !CONFIG_X86_32 */
+
+-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
++#define VDSO_CURRENT_BASE (current->mm->context.vdso)
+
+ #define VDSO_ENTRY \
+ ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
+ #define compat_arch_setup_additional_pages syscall32_setup_pages
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ /*
+ * True on X86_32 or when emulating IA32 on X86_64
+ */
+diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
+index cc70c1c..d96d011 100644
+--- a/arch/x86/include/asm/emergency-restart.h
++++ b/arch/x86/include/asm/emergency-restart.h
+@@ -15,6 +15,6 @@ enum reboot_type {
+
+ extern enum reboot_type reboot_type;
+
+-extern void machine_emergency_restart(void);
++extern void machine_emergency_restart(void) __noreturn;
+
+ #endif /* _ASM_X86_EMERGENCY_RESTART_H */
+diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
+index d09bb03..4ea4194 100644
+--- a/arch/x86/include/asm/futex.h
++++ b/arch/x86/include/asm/futex.h
+@@ -12,16 +12,18 @@
+ #include <asm/system.h>
+
+ #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
++ typecheck(u32 __user *, uaddr); \
+ asm volatile("1:\t" insn "\n" \
+ "2:\t.section .fixup,\"ax\"\n" \
+ "3:\tmov\t%3, %1\n" \
+ "\tjmp\t2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
++ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
+ : "i" (-EFAULT), "0" (oparg), "1" (0))
+
+ #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
++ typecheck(u32 __user *, uaddr); \
+ asm volatile("1:\tmovl %2, %0\n" \
+ "\tmovl\t%0, %3\n" \
+ "\t" insn "\n" \
+@@ -34,7 +36,7 @@
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=&a" (oldval), "=&r" (ret), \
+- "+m" (*uaddr), "=&r" (tem) \
++ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
+
+ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+
+ switch (op) {
+ case FUTEX_OP_SET:
+- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
++ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
++ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
+ uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
++ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
+ "2:\t.section .fixup, \"ax\"\n"
+ "3:\tmov %3, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
++ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
+ : "i" (-EFAULT), "r" (newval), "1" (oldval)
+ : "memory"
+ );
+diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
+index eb92a6e..b98b2f4 100644
+--- a/arch/x86/include/asm/hw_irq.h
++++ b/arch/x86/include/asm/hw_irq.h
+@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
+ extern void enable_IO_APIC(void);
+
+ /* Statistics */
+-extern atomic_t irq_err_count;
+-extern atomic_t irq_mis_count;
++extern atomic_unchecked_t irq_err_count;
++extern atomic_unchecked_t irq_mis_count;
+
+ /* EISA */
+ extern void eisa_set_level_irq(unsigned int irq);
+diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
+index c9e09ea..73888df 100644
+--- a/arch/x86/include/asm/i387.h
++++ b/arch/x86/include/asm/i387.h
+@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
+ {
+ int err;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
++ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
++#endif
++
+ /* See comment in fxsave() below. */
+ #ifdef CONFIG_AS_FXSAVEQ
+ asm volatile("1: fxrstorq %[fx]\n\t"
+@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
+ {
+ int err;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
++ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
++#endif
++
+ /*
+ * Clear the bytes not touched by the fxsave and reserved
+ * for the SW usage.
+@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
+ #endif /* CONFIG_X86_64 */
+
+ /* We need a safe address that is cheap to find and that is already
+- in L1 during context switch. The best choices are unfortunately
+- different for UP and SMP */
+-#ifdef CONFIG_SMP
+-#define safe_address (__per_cpu_offset[0])
+-#else
+-#define safe_address (kstat_cpu(0).cpustat.user)
+-#endif
++ in L1 during context switch. */
++#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
+
+ /*
+ * These must be called with preempt disabled
+@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
+ struct thread_info *me = current_thread_info();
+ preempt_disable();
+ if (me->status & TS_USEDFPU)
+- __save_init_fpu(me->task);
++ __save_init_fpu(current);
+ else
+ clts();
+ }
+diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
+index d8e8eef..99f81ae 100644
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
+
+ #include <linux/vmalloc.h>
+
++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
++static inline int valid_phys_addr_range(unsigned long addr, size_t count)
++{
++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
++{
++ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
+ /*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
+index bba3cf8..06bc8da 100644
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
+ sti; \
+ sysexit
+
++#define GET_CR0_INTO_RDI mov %cr0, %rdi
++#define SET_RDI_INTO_CR0 mov %rdi, %cr0
++#define GET_CR3_INTO_RDI mov %cr3, %rdi
++#define SET_RDI_INTO_CR3 mov %rdi, %cr3
++
+ #else
+ #define INTERRUPT_RETURN iret
+ #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
+index 5478825..839e88c 100644
+--- a/arch/x86/include/asm/kprobes.h
++++ b/arch/x86/include/asm/kprobes.h
+@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
+ #define RELATIVEJUMP_SIZE 5
+ #define RELATIVECALL_OPCODE 0xe8
+ #define RELATIVE_ADDR_SIZE 4
+-#define MAX_STACK_SIZE 64
+-#define MIN_STACK_SIZE(ADDR) \
+- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
+- THREAD_SIZE - (unsigned long)(ADDR))) \
+- ? (MAX_STACK_SIZE) \
+- : (((unsigned long)current_thread_info()) + \
+- THREAD_SIZE - (unsigned long)(ADDR)))
++#define MAX_STACK_SIZE 64UL
++#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
+
+ #define flush_insn_slot(p) do { } while (0)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index b4973f4..7c4d3fc 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -459,7 +459,7 @@ struct kvm_arch {
+ unsigned int n_requested_mmu_pages;
+ unsigned int n_max_mmu_pages;
+ unsigned int indirect_shadow_pages;
+- atomic_t invlpg_counter;
++ atomic_unchecked_t invlpg_counter;
+ struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+ /*
+ * Hash table of struct kvm_mmu_page.
+@@ -638,7 +638,7 @@ struct kvm_x86_ops {
+ int (*check_intercept)(struct kvm_vcpu *vcpu,
+ struct x86_instruction_info *info,
+ enum x86_intercept_stage stage);
+-};
++} __do_const;
+
+ struct kvm_arch_async_pf {
+ u32 token;
+diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
+index 9cdae5d..300d20f 100644
+--- a/arch/x86/include/asm/local.h
++++ b/arch/x86/include/asm/local.h
+@@ -18,26 +18,58 @@ typedef struct {
+
+ static inline void local_inc(local_t *l)
+ {
+- asm volatile(_ASM_INC "%0"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_DEC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_dec(local_t *l)
+ {
+- asm volatile(_ASM_DEC "%0"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_INC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_add(long i, local_t *l)
+ {
+- asm volatile(_ASM_ADD "%1,%0"
++ asm volatile(_ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_SUB "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+
+ static inline void local_sub(long i, local_t *l)
+ {
+- asm volatile(_ASM_SUB "%1,%0"
++ asm volatile(_ASM_SUB "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_ADD "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_SUB "%2,%0; sete %1"
++ asm volatile(_ASM_SUB "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_ADD "%2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_DEC "%0; sete %1"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_INC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_INC "%0; sete %1"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_DEC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_ADD "%2,%0; sets %1"
++ asm volatile(_ASM_ADD "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_SUB "%2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
+- asm volatile(_ASM_XADD "%0, %1;"
++ asm volatile(_ASM_XADD "%0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_MOV "%0,%1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (i), "+m" (l->a.counter)
+ : : "memory");
+ return i + __i;
+diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
+index 593e51d..fa69c9a 100644
+--- a/arch/x86/include/asm/mman.h
++++ b/arch/x86/include/asm/mman.h
+@@ -5,4 +5,14 @@
+
+ #include <asm-generic/mman.h>
+
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_32
++#define arch_mmap_check i386_mmap_check
++int i386_mmap_check(unsigned long addr, unsigned long len,
++ unsigned long flags);
++#endif
++#endif
++#endif
++
+ #endif /* _ASM_X86_MMAN_H */
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index 5f55e69..e20bfb1 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -9,7 +9,7 @@
+ * we put the segment information here.
+ */
+ typedef struct {
+- void *ldt;
++ struct desc_struct *ldt;
+ int size;
+
+ #ifdef CONFIG_X86_64
+@@ -18,7 +18,19 @@ typedef struct {
+ #endif
+
+ struct mutex lock;
+- void *vdso;
++ unsigned long vdso;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ unsigned long user_cs_base;
++ unsigned long user_cs_limit;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpumask_t cpu_user_cs_mask;
++#endif
++
++#endif
++#endif
+ } mm_context_t;
+
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index 6902152..399f3a2 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
+
+ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+ {
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ unsigned int i;
++ pgd_t *pgd;
++
++ pax_open_kernel();
++ pgd = get_cpu_pgd(smp_processor_id());
++ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
++ set_pgd_batched(pgd+i, native_make_pgd(0));
++ pax_close_kernel();
++#endif
++
+ #ifdef CONFIG_SMP
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+ percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+ {
+ unsigned cpu = smp_processor_id();
++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ int tlbstate = TLBSTATE_OK;
++#endif
+
+ if (likely(prev != next)) {
+ #ifdef CONFIG_SMP
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ tlbstate = percpu_read(cpu_tlbstate.state);
++#endif
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ percpu_write(cpu_tlbstate.active_mm, next);
+ #endif
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ /* Re-load page tables */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pax_open_kernel();
++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
++ pax_close_kernel();
++ load_cr3(get_cpu_pgd(cpu));
++#else
+ load_cr3(next->pgd);
++#endif
+
+ /* stop flush ipis for the previous mm */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+ load_LDT_nolock(&next->context);
+- }
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ if (!(__supported_pte_mask & _PAGE_NX)) {
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
++ smp_mb__after_clear_bit();
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++ }
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
++ prev->context.user_cs_limit != next->context.user_cs_limit))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+ #ifdef CONFIG_SMP
++ else if (unlikely(tlbstate != TLBSTATE_OK))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++#endif
++
++ }
+ else {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pax_open_kernel();
++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
++ pax_close_kernel();
++ load_cr3(get_cpu_pgd(cpu));
++#endif
++
++#ifdef CONFIG_SMP
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
+
+@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
+ */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ load_cr3(next->pgd);
++#endif
++
+ load_LDT_nolock(&next->context);
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!(__supported_pte_mask & _PAGE_NX))
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
++#endif
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++
+ }
++#endif
+ }
+-#endif
+ }
+
+ #define activate_mm(prev, next) \
+diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
+index 9eae775..c914fea 100644
+--- a/arch/x86/include/asm/module.h
++++ b/arch/x86/include/asm/module.h
+@@ -5,6 +5,7 @@
+
+ #ifdef CONFIG_X86_64
+ /* X86_64 does not define MODULE_PROC_FAMILY */
++#define MODULE_PROC_FAMILY ""
+ #elif defined CONFIG_M386
+ #define MODULE_PROC_FAMILY "386 "
+ #elif defined CONFIG_M486
+@@ -59,8 +60,20 @@
+ #error unknown processor family
+ #endif
+
+-#ifdef CONFIG_X86_32
+-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
++#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
++#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
++#else
++#define MODULE_PAX_KERNEXEC ""
+ #endif
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define MODULE_PAX_UDEREF "UDEREF "
++#else
++#define MODULE_PAX_UDEREF ""
++#endif
++
++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
++
+ #endif /* _ASM_X86_MODULE_H */
+diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
+index 7639dbf..e08a58c 100644
+--- a/arch/x86/include/asm/page_64_types.h
++++ b/arch/x86/include/asm/page_64_types.h
+@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
+
+ /* duplicated to the one in bootmem.h */
+ extern unsigned long max_pfn;
+-extern unsigned long phys_base;
++extern const unsigned long phys_base;
+
+ extern unsigned long __phys_addr(unsigned long);
+ #define __phys_reloc_hide(x) (x)
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index a7d2db9..edb023e 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+ val);
+ }
+
++static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
++{
++ pgdval_t val = native_pgd_val(pgd);
++
++ if (sizeof(pgdval_t) > sizeof(long))
++ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
++ val, (u64)val >> 32);
++ else
++ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
++ val);
++}
++
+ static inline void pgd_clear(pgd_t *pgdp)
+ {
+ set_pgd(pgdp, __pgd(0));
+@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+ pv_mmu_ops.set_fixmap(idx, phys, flags);
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void)
++{
++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
++}
++
++static inline unsigned long pax_close_kernel(void)
++{
++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+
+ static inline int arch_spin_is_locked(struct arch_spinlock *lock)
+@@ -964,7 +991,7 @@ extern void default_banner(void);
+
+ #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
+ #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+-#define PARA_INDIRECT(addr) *%cs:addr
++#define PARA_INDIRECT(addr) *%ss:addr
+ #endif
+
+ #define INTERRUPT_RETURN \
+@@ -1041,6 +1068,21 @@ extern void default_banner(void);
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
+ CLBR_NONE, \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
++
++#define GET_CR0_INTO_RDI \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
++ mov %rax,%rdi
++
++#define SET_RDI_INTO_CR0 \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++
++#define GET_CR3_INTO_RDI \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
++ mov %rax,%rdi
++
++#define SET_RDI_INTO_CR3 \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
++
+ #endif /* CONFIG_X86_32 */
+
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index 8e8b9a4..f07d725 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -84,20 +84,20 @@ struct pv_init_ops {
+ */
+ unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
+ unsigned long addr, unsigned len);
+-};
++} __no_const;
+
+
+ struct pv_lazy_ops {
+ /* Set deferred update mode, used for batching operations. */
+ void (*enter)(void);
+ void (*leave)(void);
+-};
++} __no_const;
+
+ struct pv_time_ops {
+ unsigned long long (*sched_clock)(void);
+ unsigned long long (*steal_clock)(int cpu);
+ unsigned long (*get_tsc_khz)(void);
+-};
++} __no_const;
+
+ struct pv_cpu_ops {
+ /* hooks for various privileged instructions */
+@@ -193,7 +193,7 @@ struct pv_cpu_ops {
+
+ void (*start_context_switch)(struct task_struct *prev);
+ void (*end_context_switch)(struct task_struct *next);
+-};
++} __no_const;
+
+ struct pv_irq_ops {
+ /*
+@@ -224,7 +224,7 @@ struct pv_apic_ops {
+ unsigned long start_eip,
+ unsigned long start_esp);
+ #endif
+-};
++} __no_const;
+
+ struct pv_mmu_ops {
+ unsigned long (*read_cr2)(void);
+@@ -313,6 +313,7 @@ struct pv_mmu_ops {
+ struct paravirt_callee_save make_pud;
+
+ void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
++ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
+ #endif /* PAGETABLE_LEVELS == 4 */
+ #endif /* PAGETABLE_LEVELS >= 3 */
+
+@@ -324,6 +325,12 @@ struct pv_mmu_ops {
+ an mfn. We can tell which is which from the index. */
+ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long (*pax_open_kernel)(void);
++ unsigned long (*pax_close_kernel)(void);
++#endif
++
+ };
+
+ struct arch_spinlock;
+@@ -334,7 +341,7 @@ struct pv_lock_ops {
+ void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
+ int (*spin_trylock)(struct arch_spinlock *lock);
+ void (*spin_unlock)(struct arch_spinlock *lock);
+-};
++} __no_const;
+
+ /* This contains all the paravirt structures: we get a convenient
+ * number for each function using the offset which we use to indicate
+diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
+index b4389a4..b7ff22c 100644
+--- a/arch/x86/include/asm/pgalloc.h
++++ b/arch/x86/include/asm/pgalloc.h
+@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
+ {
+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
++}
++
++static inline void pmd_populate_user(struct mm_struct *mm,
++ pmd_t *pmd, pte_t *pte)
++{
++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+ set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+ }
+
+diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
+index 98391db..8f6984e 100644
+--- a/arch/x86/include/asm/pgtable-2level.h
++++ b/arch/x86/include/asm/pgtable-2level.h
+@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
+index effff47..f9e4035 100644
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
++ pax_close_kernel();
+ }
+
+ /*
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 18601c8..3d716d1 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
+
+ #ifndef __PAGETABLE_PUD_FOLDED
+ #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
++#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
+ #define pgd_clear(pgd) native_pgd_clear(pgd)
+ #endif
+
+@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
+
+ #define arch_end_context_switch(prev) do {} while(0)
+
++#define pax_open_kernel() native_pax_open_kernel()
++#define pax_close_kernel() native_pax_close_kernel()
+ #endif /* CONFIG_PARAVIRT */
+
++#define __HAVE_ARCH_PAX_OPEN_KERNEL
++#define __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long native_pax_open_kernel(void)
++{
++ unsigned long cr0;
++
++ preempt_disable();
++ barrier();
++ cr0 = read_cr0() ^ X86_CR0_WP;
++ BUG_ON(unlikely(cr0 & X86_CR0_WP));
++ write_cr0(cr0);
++ return cr0 ^ X86_CR0_WP;
++}
++
++static inline unsigned long native_pax_close_kernel(void)
++{
++ unsigned long cr0;
++
++ cr0 = read_cr0() ^ X86_CR0_WP;
++ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
++ write_cr0(cr0);
++ barrier();
++ preempt_enable_no_resched();
++ return cr0 ^ X86_CR0_WP;
++}
++#else
++static inline unsigned long native_pax_open_kernel(void) { return 0; }
++static inline unsigned long native_pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
++static inline int pte_user(pte_t pte)
++{
++ return pte_val(pte) & _PAGE_USER;
++}
++
+ static inline int pte_dirty(pte_t pte)
+ {
+ return pte_flags(pte) & _PAGE_DIRTY;
+@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
+ return pte_clear_flags(pte, _PAGE_RW);
+ }
+
++static inline pte_t pte_mkread(pte_t pte)
++{
++ return __pte(pte_val(pte) | _PAGE_USER);
++}
++
+ static inline pte_t pte_mkexec(pte_t pte)
+ {
+- return pte_clear_flags(pte, _PAGE_NX);
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_clear_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_set_flags(pte, _PAGE_USER);
++}
++
++static inline pte_t pte_exprotect(pte_t pte)
++{
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_set_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_clear_flags(pte, _PAGE_USER);
+ }
+
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+ #endif
+
+ #ifndef __ASSEMBLY__
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
++static inline pgd_t *get_cpu_pgd(unsigned int cpu)
++{
++ return cpu_pgd[cpu];
++}
++#endif
++
+ #include <linux/mm_types.h>
+
+ static inline int pte_none(pte_t pte)
+@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+
+ static inline int pgd_bad(pgd_t pgd)
+ {
+- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
+ }
+
+ static inline int pgd_none(pgd_t pgd)
+@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
++#endif
++
+ /*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
+ #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
+ #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
+
++#ifdef CONFIG_X86_32
++#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
++#else
++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
++#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
++#else
++#define PAX_USER_SHADOW_BASE (_AC(0,UL))
++#endif
++
++#endif
++
+ #ifndef __ASSEMBLY__
+
+ extern int direct_gbpages;
+@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ * dst and src can be on the same page, but the range must not overlap,
+ * and must not cross a page boundary.
+ */
+-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
+ {
+- memcpy(dst, src, count * sizeof(pgd_t));
++ pax_open_kernel();
++ while (count--)
++ *dst++ = *src++;
++ pax_close_kernel();
+ }
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
++#else
++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
++#endif
+
+ #include <asm-generic/pgtable.h>
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
+index 0c92113..34a77c6 100644
+--- a/arch/x86/include/asm/pgtable_32.h
++++ b/arch/x86/include/asm/pgtable_32.h
+@@ -25,9 +25,6 @@
+ struct mm_struct;
+ struct vm_area_struct;
+
+-extern pgd_t swapper_pg_dir[1024];
+-extern pgd_t initial_page_table[1024];
+-
+ static inline void pgtable_cache_init(void) { }
+ static inline void check_pgt_cache(void) { }
+ void paging_init(void);
+@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
+ # include <asm/pgtable-2level.h>
+ #endif
+
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
++extern pgd_t initial_page_table[PTRS_PER_PGD];
++#ifdef CONFIG_X86_PAE
++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
++#endif
++
+ #if defined(CONFIG_HIGHPTE)
+ #define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
+@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
+ /* Clear a kernel PTE and flush it from the TLB */
+ #define kpte_clear_flush(ptep, vaddr) \
+ do { \
++ pax_open_kernel(); \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
++ pax_close_kernel(); \
+ __flush_tlb_one((vaddr)); \
+ } while (0)
+
+@@ -74,6 +79,9 @@ do { \
+
+ #endif /* !__ASSEMBLY__ */
+
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
+ /*
+ * kern_addr_valid() is (1) for FLATMEM and (0) for
+ * SPARSEMEM and DISCONTIGMEM
+diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
+index ed5903b..c7fe163 100644
+--- a/arch/x86/include/asm/pgtable_32_types.h
++++ b/arch/x86/include/asm/pgtable_32_types.h
+@@ -8,7 +8,7 @@
+ */
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level_types.h>
+-# define PMD_SIZE (1UL << PMD_SHIFT)
++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+ # define PMD_MASK (~(PMD_SIZE - 1))
+ #else
+ # include <asm/pgtable-2level_types.h>
+@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
+ # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifndef __ASSEMBLY__
++extern unsigned char MODULES_EXEC_VADDR[];
++extern unsigned char MODULES_EXEC_END[];
++#endif
++#include <asm/boot.h>
++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
++#else
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++#endif
++
+ #define MODULES_VADDR VMALLOC_START
+ #define MODULES_END VMALLOC_END
+ #define MODULES_LEN (MODULES_VADDR - MODULES_END)
+diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
+index 975f709..107976d 100644
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -16,10 +16,14 @@
+
+ extern pud_t level3_kernel_pgt[512];
+ extern pud_t level3_ident_pgt[512];
++extern pud_t level3_vmalloc_start_pgt[512];
++extern pud_t level3_vmalloc_end_pgt[512];
++extern pud_t level3_vmemmap_pgt[512];
++extern pud_t level2_vmemmap_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pmd_t level2_fixmap_pgt[512];
+-extern pmd_t level2_ident_pgt[512];
+-extern pgd_t init_level4_pgt[];
++extern pmd_t level2_ident_pgt[512*2];
++extern pgd_t init_level4_pgt[512];
+
+ #define swapper_pg_dir init_level4_pgt
+
+@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_pmd_clear(pmd_t *pmd)
+@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
+
+ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
+ {
++ pax_open_kernel();
++ *pgdp = pgd;
++ pax_close_kernel();
++}
++
++static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
++{
+ *pgdp = pgd;
+ }
+
+diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
+index 766ea16..5b96cb3 100644
+--- a/arch/x86/include/asm/pgtable_64_types.h
++++ b/arch/x86/include/asm/pgtable_64_types.h
+@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
+ #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
+ #define MODULES_END _AC(0xffffffffff000000, UL)
+ #define MODULES_LEN (MODULES_END - MODULES_VADDR)
++#define MODULES_EXEC_VADDR MODULES_VADDR
++#define MODULES_EXEC_END MODULES_END
++
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
+
+ #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index 013286a..8b42f4f 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -16,13 +16,12 @@
+ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+ #define _PAGE_BIT_PAT 7 /* on 4KB pages */
+ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
+ #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
+ #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
+ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
+-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
++#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
+ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+ /* If _PAGE_BIT_PRESENT is clear, we use these: */
+@@ -40,7 +39,6 @@
+ #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+ #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+ #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+ #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
+ #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+ #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+@@ -57,8 +55,10 @@
+
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
+-#else
++#elif defined(CONFIG_KMEMCHECK)
+ #define _PAGE_NX (_AT(pteval_t, 0))
++#else
++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+ #endif
+
+ #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+@@ -96,6 +96,9 @@
+ #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
+
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++#define PAGE_SHARED_NOEXEC PAGE_SHARED
++
+ #define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+ #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
+@@ -106,7 +109,7 @@
+ #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
+ #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
+-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
+ #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
+ #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+@@ -168,8 +171,8 @@
+ * bits are combined, this will alow user to access the high address mapped
+ * VDSO in the presence of CONFIG_COMPAT_VDSO
+ */
+-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
+-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
+ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
+ #endif
+
+@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
+ {
+ return native_pgd_val(pgd) & PTE_FLAGS_MASK;
+ }
++#endif
+
++#if PAGETABLE_LEVELS == 3
++#include <asm-generic/pgtable-nopud.h>
++#endif
++
++#if PAGETABLE_LEVELS == 2
++#include <asm-generic/pgtable-nopmd.h>
++#endif
++
++#ifndef __ASSEMBLY__
+ #if PAGETABLE_LEVELS > 3
+ typedef struct { pudval_t pud; } pud_t;
+
+@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
+ return pud.pud;
+ }
+ #else
+-#include <asm-generic/pgtable-nopud.h>
+-
+ static inline pudval_t native_pud_val(pud_t pud)
+ {
+ return native_pgd_val(pud.pgd);
+@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ return pmd.pmd;
+ }
+ #else
+-#include <asm-generic/pgtable-nopmd.h>
+-
+ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ {
+ return native_pgd_val(pmd.pud.pgd);
+@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
+
+ extern pteval_t __supported_pte_mask;
+ extern void set_nx(void);
+-extern int nx_enabled;
+
+ #define pgprot_writecombine pgprot_writecombine
+ extern pgprot_t pgprot_writecombine(pgprot_t prot);
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index b650435..eefa566 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -268,7 +268,7 @@ struct tss_struct {
+
+ } ____cacheline_aligned;
+
+-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
++extern struct tss_struct init_tss[NR_CPUS];
+
+ /*
+ * Save the original ist values for checking stack pointers during debugging
+@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
+ */
+ #define TASK_SIZE PAGE_OFFSET
+ #define TASK_SIZE_MAX TASK_SIZE
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
++#else
+ #define STACK_TOP TASK_SIZE
+-#define STACK_TOP_MAX STACK_TOP
++#endif
++
++#define STACK_TOP_MAX TASK_SIZE
+
+ #define INIT_THREAD { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
+@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
+ */
+ #define INIT_TSS { \
+ .x86_tss = { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .ss0 = __KERNEL_DS, \
+ .ss1 = __KERNEL_CS, \
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
+ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+ #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
+-#define KSTK_TOP(info) \
+-({ \
+- unsigned long *__ptr = (unsigned long *)(info); \
+- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
+-})
++#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
+
+ /*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ #define task_pt_regs(task) \
+ ({ \
+ struct pt_regs *__regs__; \
+- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
+ __regs__ - 1; \
+ })
+
+@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ /*
+ * User space process size. 47bits minus one guard page.
+ */
+-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
+
+ /* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
+- 0xc0000000 : 0xFFFFe000)
++ 0xc0000000 : 0xFFFFf000)
+
+ #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
+ IA32_PAGE_OFFSET : TASK_SIZE_MAX)
+@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ #define STACK_TOP_MAX TASK_SIZE_MAX
+
+ #define INIT_THREAD { \
+- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
+ }
+
+ #define INIT_TSS { \
+- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
+ }
+
+ /*
+@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+ */
+ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
++#endif
++
+ #define KSTK_EIP(task) (task_pt_regs(task)->ip)
+
+ /* Get/set a process' ability to use the timestamp counter instruction */
+diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
+index 3566454..4bdfb8c 100644
+--- a/arch/x86/include/asm/ptrace.h
++++ b/arch/x86/include/asm/ptrace.h
+@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
+ }
+
+ /*
+- * user_mode_vm(regs) determines whether a register set came from user mode.
++ * user_mode(regs) determines whether a register set came from user mode.
+ * This is true if V8086 mode was enabled OR if the register set was from
+ * protected mode with RPL-3 CS value. This tricky test checks that with
+ * one comparison. Many places in the kernel can bypass this full check
+- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
++ * be used.
+ */
+-static inline int user_mode(struct pt_regs *regs)
++static inline int user_mode_novm(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
+ #else
+- return !!(regs->cs & 3);
++ return !!(regs->cs & SEGMENT_RPL_MASK);
+ #endif
+ }
+
+-static inline int user_mode_vm(struct pt_regs *regs)
++static inline int user_mode(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
+ USER_RPL;
+ #else
+- return user_mode(regs);
++ return user_mode_novm(regs);
+ #endif
+ }
+
+@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
+ #ifdef CONFIG_X86_64
+ static inline bool user_64bit_mode(struct pt_regs *regs)
+ {
++ unsigned long cs = regs->cs & 0xffff;
+ #ifndef CONFIG_PARAVIRT
+ /*
+ * On non-paravirt systems, this is the only long mode CPL 3
+ * selector. We do not allow long mode selectors in the LDT.
+ */
+- return regs->cs == __USER_CS;
++ return cs == __USER_CS;
+ #else
+ /* Headers are too twisted for this to go in paravirt.h. */
+- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
++ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
+ #endif
+ }
+ #endif
+diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
+index 92f29706..a79cbbb 100644
+--- a/arch/x86/include/asm/reboot.h
++++ b/arch/x86/include/asm/reboot.h
+@@ -6,19 +6,19 @@
+ struct pt_regs;
+
+ struct machine_ops {
+- void (*restart)(char *cmd);
+- void (*halt)(void);
+- void (*power_off)(void);
++ void (* __noreturn restart)(char *cmd);
++ void (* __noreturn halt)(void);
++ void (* __noreturn power_off)(void);
+ void (*shutdown)(void);
+ void (*crash_shutdown)(struct pt_regs *);
+- void (*emergency_restart)(void);
+-};
++ void (* __noreturn emergency_restart)(void);
++} __no_const;
+
+ extern struct machine_ops machine_ops;
+
+ void native_machine_crash_shutdown(struct pt_regs *regs);
+ void native_machine_shutdown(void);
+-void machine_real_restart(unsigned int type);
++void machine_real_restart(unsigned int type) __noreturn;
+ /* These must match dispatch_table in reboot_32.S */
+ #define MRR_BIOS 0
+ #define MRR_APM 1
+diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
+index 2dbe4a7..ce1db00 100644
+--- a/arch/x86/include/asm/rwsem.h
++++ b/arch/x86/include/asm/rwsem.h
+@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
+ {
+ asm volatile("# beginning down_read\n\t"
+ LOCK_PREFIX _ASM_INC "(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_DEC "(%1)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* adds 0x00000001 */
+ " jns 1f\n"
+ " call call_rwsem_down_read_failed\n"
+@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
+ "1:\n\t"
+ " mov %1,%2\n\t"
+ " add %3,%2\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "sub %3,%2\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ " jle 2f\n\t"
+ LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ " jnz 1b\n\t"
+@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+ long tmp;
+ asm volatile("# beginning down_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* adds 0xffff0001, returns the old value */
+ " test %1,%1\n\t"
+ /* was the count 0 before? */
+@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
+ long tmp;
+ asm volatile("# beginning __up_read\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* subtracts 1, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n" /* expects old value in %edx */
+@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
+ long tmp;
+ asm volatile("# beginning __up_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* subtracts 0xffff0001, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n" /* expects old value in %edx */
+@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+ {
+ asm volatile("# beginning __downgrade_write\n\t"
+ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /*
+ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
+ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
+@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+ */
+ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+ {
+- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_SUB "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (sem->count)
+ : "er" (delta));
+ }
+@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+ */
+ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
+ {
+- return delta + xadd(&sem->count, delta);
++ return delta + xadd_check_overflow(&sem->count, delta);
+ }
+
+ #endif /* __KERNEL__ */
+diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
+index 5e64171..f58957e 100644
+--- a/arch/x86/include/asm/segment.h
++++ b/arch/x86/include/asm/segment.h
+@@ -64,10 +64,15 @@
+ * 26 - ESPFIX small SS
+ * 27 - per-cpu [ offset to per-cpu data area ]
+ * 28 - stack_canary-20 [ for stack protector ]
+- * 29 - unused
+- * 30 - unused
++ * 29 - PCI BIOS CS
++ * 30 - PCI BIOS DS
+ * 31 - TSS for double fault handler
+ */
++#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
++#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
++#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
++#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
++
+ #define GDT_ENTRY_TLS_MIN 6
+ #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+@@ -79,6 +84,8 @@
+
+ #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
+
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
++
+ #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
+
+ #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
+@@ -104,6 +111,12 @@
+ #define __KERNEL_STACK_CANARY 0
+ #endif
+
++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
++
++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
++
+ #define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+ /*
+@@ -141,7 +154,7 @@
+ */
+
+ /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
+
+
+ #else
+@@ -165,6 +178,8 @@
+ #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
+ #define __USER32_DS __USER_DS
+
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
++
+ #define GDT_ENTRY_TSS 8 /* needs two entries */
+ #define GDT_ENTRY_LDT 10 /* needs two entries */
+ #define GDT_ENTRY_TLS_MIN 12
+@@ -185,6 +200,7 @@
+ #endif
+
+ #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
++#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
+ #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
+ #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
+ #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index 73b11bc..d4a3b63 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+ /* cpus sharing the last level cache: */
+ DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
+ DECLARE_PER_CPU(u16, cpu_llc_id);
+-DECLARE_PER_CPU(int, cpu_number);
++DECLARE_PER_CPU(unsigned int, cpu_number);
+
+ static inline struct cpumask *cpu_sibling_mask(int cpu)
+ {
+@@ -77,7 +77,7 @@ struct smp_ops {
+
+ void (*send_call_func_ipi)(const struct cpumask *mask);
+ void (*send_call_func_single_ipi)(int cpu);
+-};
++} __no_const;
+
+ /* Globals due to paravirt */
+ extern void set_cpu_sibling_map(int cpu);
+@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
+ extern int safe_smp_processor_id(void);
+
+ #elif defined(CONFIG_X86_64_SMP)
+-#define raw_smp_processor_id() (percpu_read(cpu_number))
+-
+-#define stack_smp_processor_id() \
+-({ \
+- struct thread_info *ti; \
+- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+- ti->cpu; \
+-})
++#define raw_smp_processor_id() (percpu_read(cpu_number))
++#define stack_smp_processor_id() raw_smp_processor_id()
+ #define safe_smp_processor_id() smp_processor_id()
+
+ #endif
+diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
+index 972c260..43ab1fd 100644
+--- a/arch/x86/include/asm/spinlock.h
++++ b/arch/x86/include/asm/spinlock.h
+@@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
+ static inline void arch_read_lock(arch_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ "jns 1f\n"
+ "call __read_lock_failed\n\t"
+ "1:\n"
+@@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
+ static inline void arch_write_lock(arch_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ "jz 1f\n"
+ "call __write_lock_failed\n\t"
+ "1:\n"
+@@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
+
+ static inline void arch_read_unlock(arch_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
++ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ :"+m" (rw->lock) : : "memory");
+ }
+
+ static inline void arch_write_unlock(arch_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
++ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
+ }
+
+diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
+index 1575177..cb23f52 100644
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
+@@ -48,7 +48,7 @@
+ * head_32 for boot CPU and setup_per_cpu_areas() for others.
+ */
+ #define GDT_STACK_CANARY_INIT \
+- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
++ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
+
+ /*
+ * Initialize the stackprotector canary value.
+@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
+
+ static inline void load_stack_canary_segment(void)
+ {
+-#ifdef CONFIG_X86_32
++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
+ asm volatile ("mov %0, %%gs" : : "r" (0));
+ #endif
+ }
+diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
+index 70bbe39..4ae2bd4 100644
+--- a/arch/x86/include/asm/stacktrace.h
++++ b/arch/x86/include/asm/stacktrace.h
+@@ -11,28 +11,20 @@
+
+ extern int kstack_depth_to_print;
+
+-struct thread_info;
++struct task_struct;
+ struct stacktrace_ops;
+
+-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+- unsigned long *stack,
+- unsigned long bp,
+- const struct stacktrace_ops *ops,
+- void *data,
+- unsigned long *end,
+- int *graph);
++typedef unsigned long walk_stack_t(struct task_struct *task,
++ void *stack_start,
++ unsigned long *stack,
++ unsigned long bp,
++ const struct stacktrace_ops *ops,
++ void *data,
++ unsigned long *end,
++ int *graph);
+
+-extern unsigned long
+-print_context_stack(struct thread_info *tinfo,
+- unsigned long *stack, unsigned long bp,
+- const struct stacktrace_ops *ops, void *data,
+- unsigned long *end, int *graph);
+-
+-extern unsigned long
+-print_context_stack_bp(struct thread_info *tinfo,
+- unsigned long *stack, unsigned long bp,
+- const struct stacktrace_ops *ops, void *data,
+- unsigned long *end, int *graph);
++extern walk_stack_t print_context_stack;
++extern walk_stack_t print_context_stack_bp;
+
+ /* Generic stack tracer with callbacks */
+
+@@ -40,7 +32,7 @@ struct stacktrace_ops {
+ void (*address)(void *data, unsigned long address, int reliable);
+ /* On negative return stop dumping */
+ int (*stack)(void *data, char *name);
+- walk_stack_t walk_stack;
++ walk_stack_t *walk_stack;
+ };
+
+ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
+index cb23852..2dde194 100644
+--- a/arch/x86/include/asm/sys_ia32.h
++++ b/arch/x86/include/asm/sys_ia32.h
+@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
+ compat_sigset_t __user *, unsigned int);
+ asmlinkage long sys32_alarm(unsigned int);
+
+-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
++asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
+ asmlinkage long sys32_sysfs(int, u32, u32);
+
+ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
+diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
+index 2d2f01c..f985723 100644
+--- a/arch/x86/include/asm/system.h
++++ b/arch/x86/include/asm/system.h
+@@ -129,7 +129,7 @@ do { \
+ "call __switch_to\n\t" \
+ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
+ __switch_canary \
+- "movq %P[thread_info](%%rsi),%%r8\n\t" \
++ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
+ "movq %%rax,%%rdi\n\t" \
+ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "jnz ret_from_fork\n\t" \
+@@ -140,7 +140,7 @@ do { \
+ [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
+ [ti_flags] "i" (offsetof(struct thread_info, flags)), \
+ [_tif_fork] "i" (_TIF_FORK), \
+- [thread_info] "i" (offsetof(struct task_struct, stack)), \
++ [thread_info] "m" (current_tinfo), \
+ [current_task] "m" (current_task) \
+ __switch_canary_iparam \
+ : "memory", "cc" __EXTRA_CLOBBER)
+@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
+ {
+ unsigned long __limit;
+ asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+- return __limit + 1;
++ return __limit;
+ }
+
+ static inline void native_clts(void)
+@@ -397,13 +397,13 @@ void enable_hlt(void);
+
+ void cpu_idle_wait(void);
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+ void default_idle(void);
+ bool set_pm_idle_to_default(void);
+
+-void stop_this_cpu(void *dummy);
++void stop_this_cpu(void *dummy) __noreturn;
+
+ /*
+ * Force strict CPU ordering.
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index a1fe5c1..ee326d8 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <asm/page.h>
+ #include <asm/types.h>
++#include <asm/percpu.h>
+
+ /*
+ * low level task data that entry.S needs immediate access to
+@@ -24,7 +25,6 @@ struct exec_domain;
+ #include <linux/atomic.h>
+
+ struct thread_info {
+- struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ __u32 flags; /* low level flags */
+ __u32 status; /* thread synchronous flags */
+@@ -34,18 +34,12 @@ struct thread_info {
+ mm_segment_t addr_limit;
+ struct restart_block restart_block;
+ void __user *sysenter_return;
+-#ifdef CONFIG_X86_32
+- unsigned long previous_esp; /* ESP of the previous stack in
+- case of nested (IRQ) stacks
+- */
+- __u8 supervisor_stack[0];
+-#endif
++ unsigned long lowest_stack;
+ int uaccess_err;
+ };
+
+-#define INIT_THREAD_INFO(tsk) \
++#define INIT_THREAD_INFO \
+ { \
+- .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+@@ -56,7 +50,7 @@ struct thread_info {
+ }, \
+ }
+
+-#define init_thread_info (init_thread_union.thread_info)
++#define init_thread_info (init_thread_union.stack)
+ #define init_stack (init_thread_union.stack)
+
+ #else /* !__ASSEMBLY__ */
+@@ -170,45 +164,40 @@ struct thread_info {
+ ret; \
+ })
+
+-#ifdef CONFIG_X86_32
+-
+-#define STACK_WARN (THREAD_SIZE/8)
+-/*
+- * macros/functions for gaining access to the thread information structure
+- *
+- * preempt_count needs to be 1 initially, until the scheduler is functional.
+- */
+-#ifndef __ASSEMBLY__
+-
+-
+-/* how to get the current stack pointer from C */
+-register unsigned long current_stack_pointer asm("esp") __used;
+-
+-/* how to get the thread information struct from C */
+-static inline struct thread_info *current_thread_info(void)
+-{
+- return (struct thread_info *)
+- (current_stack_pointer & ~(THREAD_SIZE - 1));
+-}
+-
+-#else /* !__ASSEMBLY__ */
+-
++#ifdef __ASSEMBLY__
+ /* how to get the thread information struct from ASM */
+ #define GET_THREAD_INFO(reg) \
+- movl $-THREAD_SIZE, reg; \
+- andl %esp, reg
++ mov PER_CPU_VAR(current_tinfo), reg
+
+ /* use this one if reg already contains %esp */
+-#define GET_THREAD_INFO_WITH_ESP(reg) \
+- andl $-THREAD_SIZE, reg
++#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
++#else
++/* how to get the thread information struct from C */
++DECLARE_PER_CPU(struct thread_info *, current_tinfo);
++
++static __always_inline struct thread_info *current_thread_info(void)
++{
++ return percpu_read_stable(current_tinfo);
++}
++#endif
++
++#ifdef CONFIG_X86_32
++
++#define STACK_WARN (THREAD_SIZE/8)
++/*
++ * macros/functions for gaining access to the thread information structure
++ *
++ * preempt_count needs to be 1 initially, until the scheduler is functional.
++ */
++#ifndef __ASSEMBLY__
++
++/* how to get the current stack pointer from C */
++register unsigned long current_stack_pointer asm("esp") __used;
+
+ #endif
+
+ #else /* X86_32 */
+
+-#include <asm/percpu.h>
+-#define KERNEL_STACK_OFFSET (5*8)
+-
+ /*
+ * macros/functions for gaining access to the thread information structure
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+@@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
+ #ifndef __ASSEMBLY__
+ DECLARE_PER_CPU(unsigned long, kernel_stack);
+
+-static inline struct thread_info *current_thread_info(void)
+-{
+- struct thread_info *ti;
+- ti = (void *)(percpu_read_stable(kernel_stack) +
+- KERNEL_STACK_OFFSET - THREAD_SIZE);
+- return ti;
+-}
+-
+-#else /* !__ASSEMBLY__ */
+-
+-/* how to get the thread information struct from ASM */
+-#define GET_THREAD_INFO(reg) \
+- movq PER_CPU_VAR(kernel_stack),reg ; \
+- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
+-
++/* how to get the current stack pointer from C */
++register unsigned long current_stack_pointer asm("rsp") __used;
+ #endif
+
+ #endif /* !X86_32 */
+@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
+ extern void free_thread_info(struct thread_info *ti);
+ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+ #define arch_task_cache_init arch_task_cache_init
++
++#define __HAVE_THREAD_FUNCTIONS
++#define task_thread_info(task) (&(task)->tinfo)
++#define task_stack_page(task) ((task)->stack)
++#define setup_thread_stack(p, org) do {} while (0)
++#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
++
++#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
++extern struct task_struct *alloc_task_struct_node(int node);
++extern void free_task_struct(struct task_struct *);
++
+ #endif
+ #endif /* _ASM_X86_THREAD_INFO_H */
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 36361bf..324f262 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -7,12 +7,15 @@
+ #include <linux/compiler.h>
+ #include <linux/thread_info.h>
+ #include <linux/string.h>
++#include <linux/sched.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
+
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+@@ -28,7 +31,12 @@
+
+ #define get_ds() (KERNEL_DS)
+ #define get_fs() (current_thread_info()->addr_limit)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++void __set_fs(mm_segment_t x);
++void set_fs(mm_segment_t x);
++#else
+ #define set_fs(x) (current_thread_info()->addr_limit = (x))
++#endif
+
+ #define segment_eq(a, b) ((a).seg == (b).seg)
+
+@@ -76,7 +84,33 @@
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define access_ok(type, addr, size) \
++({ \
++ long __size = size; \
++ unsigned long __addr = (unsigned long)addr; \
++ unsigned long __addr_ao = __addr & PAGE_MASK; \
++ unsigned long __end_ao = __addr + __size - 1; \
++ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
++ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
++ while(__addr_ao <= __end_ao) { \
++ char __c_ao; \
++ __addr_ao += PAGE_SIZE; \
++ if (__size > PAGE_SIZE) \
++ cond_resched(); \
++ if (__get_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ if (type != VERIFY_WRITE) { \
++ __addr = __addr_ao; \
++ continue; \
++ } \
++ if (__put_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ __addr = __addr_ao; \
++ } \
++ } \
++ __ret_ao; \
++})
+
+ /*
+ * The exception table consists of pairs of addresses: the first is the
+@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
+ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+
+-
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg "gs;"
++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
++#else
++#define __copyuser_seg
++#define __COPYUSER_SET_ES
++#define __COPYUSER_RESTORE_ES
++#endif
+
+ #ifdef CONFIG_X86_32
+ #define __put_user_asm_u64(x, addr, err, errret) \
+- asm volatile("1: movl %%eax,0(%2)\n" \
+- "2: movl %%edx,4(%2)\n" \
++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
++ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
+ : "A" (x), "r" (addr), "i" (errret), "0" (err))
+
+ #define __put_user_asm_ex_u64(x, addr) \
+- asm volatile("1: movl %%eax,0(%1)\n" \
+- "2: movl %%edx,4(%1)\n" \
++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
++ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
+ "3:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ _ASM_EXTABLE(2b, 3b - 2b) \
+@@ -252,7 +294,7 @@ extern void __put_user_8(void);
+ __typeof__(*(ptr)) __pu_val; \
+ __chk_user_ptr(ptr); \
+ might_fault(); \
+- __pu_val = x; \
++ __pu_val = (x); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __put_user_x(1, __pu_val, ptr, __ret_pu); \
+@@ -373,7 +415,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+@@ -381,7 +423,7 @@ do { \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (err), ltype(x) \
++ : "=r" (err), ltype (x) \
+ : "m" (__m(addr)), "i" (errret), "0" (err))
+
+ #define __get_user_size_ex(x, ptr, size) \
+@@ -406,7 +448,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : ltype(x) : "m" (__m(addr)))
+@@ -423,13 +465,24 @@ do { \
+ int __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+- (x) = (__force __typeof__(*(ptr)))__gu_val; \
++ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+ })
+
+ /* FIXME: this hack is definitely wrong -AK */
+ struct __large_struct { unsigned long buf[100]; };
+-#define __m(x) (*(struct __large_struct __user *)(x))
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define ____m(x) \
++({ \
++ unsigned long ____x = (unsigned long)(x); \
++ if (____x < PAX_USER_SHADOW_BASE) \
++ ____x += PAX_USER_SHADOW_BASE; \
++ (void __user *)____x; \
++})
++#else
++#define ____m(x) (x)
++#endif
++#define __m(x) (*(struct __large_struct __user *)____m(x))
+
+ /*
+ * Tell gcc we read from memory instead of writing: this is because
+@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
+ * aliasing issues.
+ */
+ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r"(err) \
+- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
+
+ #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : : ltype(x), "m" (__m(addr)))
+@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
+ * On error, the variable @x is set to zero.
+ */
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __get_user(x, ptr) get_user((x), (ptr))
++#else
+ #define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
++#endif
+
+ /**
+ * __put_user: - Write a simple value into user space, with less checking.
+@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
+ * Returns zero on success, or -EFAULT on error.
+ */
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __put_user(x, ptr) put_user((x), (ptr))
++#else
+ #define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
++#endif
+
+ #define __get_user_unaligned __get_user
+ #define __put_user_unaligned __put_user
+@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
+ #define get_user_ex(x, ptr) do { \
+ unsigned long __gue_val; \
+ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
+- (x) = (__force __typeof__(*(ptr)))__gue_val; \
++ (x) = (__typeof__(*(ptr)))__gue_val; \
+ } while (0)
+
+ #ifdef CONFIG_X86_WP_WORKS_OK
+diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
+index 566e803..b9521e9 100644
+--- a/arch/x86/include/asm/uaccess_32.h
++++ b/arch/x86/include/asm/uaccess_32.h
+@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
+ static __always_inline unsigned long __must_check
+ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ return ret;
+ }
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_to_user_ll(to, from, n);
+ }
+
+@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
+ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
+ return __copy_to_user_inatomic(to, from, n);
+ }
+
+ static __always_inline unsigned long
+ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ /* Avoid zeroing the tail if the copy fails..
+ * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
+ * but as the zeroing behaviour is only significant when n is not
+@@ -137,6 +146,10 @@ static __always_inline unsigned long
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ return ret;
+ }
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_from_user_ll(to, from, n);
+ }
+
+@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
+ const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -181,15 +200,19 @@ static __always_inline unsigned long
+ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
+ {
+- return __copy_from_user_ll_nocache_nozero(to, from, n);
++ if ((long)n < 0)
++ return n;
++
++ return __copy_from_user_ll_nocache_nozero(to, from, n);
+ }
+
+-unsigned long __must_check copy_to_user(void __user *to,
+- const void *from, unsigned long n);
+-unsigned long __must_check _copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n);
+-
++extern void copy_to_user_overflow(void)
++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
++ __compiletime_error("copy_to_user() buffer size is not provably correct")
++#else
++ __compiletime_warning("copy_to_user() buffer size is not provably correct")
++#endif
++;
+
+ extern void copy_from_user_overflow(void)
+ #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
+ #endif
+ ;
+
+-static inline unsigned long __must_check copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n)
++/**
++ * copy_to_user: - Copy a block of data into user space.
++ * @to: Destination address, in user space.
++ * @from: Source address, in kernel space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from kernel space to user space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ */
++static inline unsigned long __must_check
++copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ int sz = __compiletime_object_size(from);
++
++ if (unlikely(sz != -1 && sz < n))
++ copy_to_user_overflow();
++ else if (access_ok(VERIFY_WRITE, to, n))
++ n = __copy_to_user(to, from, n);
++ return n;
++}
++
++/**
++ * copy_from_user: - Copy a block of data from user space.
++ * @to: Destination address, in kernel space.
++ * @from: Source address, in user space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from user space to kernel space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ *
++ * If some data could not be copied, this function will pad the copied
++ * data to the requested size using zero bytes.
++ */
++static inline unsigned long __must_check
++copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ int sz = __compiletime_object_size(to);
+
+- if (likely(sz == -1 || sz >= n))
+- n = _copy_from_user(to, from, n);
+- else
++ if (unlikely(sz != -1 && sz < n))
+ copy_from_user_overflow();
+-
++ else if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if ((long)n > 0) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ memset(to, 0, n);
++ }
+ return n;
+ }
+
+diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
+index 1c66d30..23ab77d 100644
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -10,6 +10,9 @@
+ #include <asm/alternative.h>
+ #include <asm/cpufeature.h>
+ #include <asm/page.h>
++#include <asm/pgtable.h>
++
++#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+ /*
+ * Copy To/From Userspace
+@@ -17,12 +20,12 @@
+
+ /* Handles exceptions in both to and from, but doesn't do access_ok */
+ __must_check unsigned long
+-copy_user_generic_string(void *to, const void *from, unsigned len);
++copy_user_generic_string(void *to, const void *from, unsigned long len);
+ __must_check unsigned long
+-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
++copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
+
+ static __always_inline __must_check unsigned long
+-copy_user_generic(void *to, const void *from, unsigned len)
++copy_user_generic(void *to, const void *from, unsigned long len)
+ {
+ unsigned ret;
+
+@@ -36,138 +39,222 @@ copy_user_generic(void *to, const void *from, unsigned len)
+ return ret;
+ }
+
++static __always_inline __must_check unsigned long
++__copy_to_user(void __user *to, const void *from, unsigned long len);
++static __always_inline __must_check unsigned long
++__copy_from_user(void *to, const void __user *from, unsigned long len);
+ __must_check unsigned long
+-_copy_to_user(void __user *to, const void *from, unsigned len);
+-__must_check unsigned long
+-_copy_from_user(void *to, const void __user *from, unsigned len);
+-__must_check unsigned long
+-copy_in_user(void __user *to, const void __user *from, unsigned len);
++copy_in_user(void __user *to, const void __user *from, unsigned long len);
+
+ static inline unsigned long __must_check copy_from_user(void *to,
+ const void __user *from,
+ unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
+-
+ might_fault();
+- if (likely(sz == -1 || sz >= n))
+- n = _copy_from_user(to, from, n);
+-#ifdef CONFIG_DEBUG_VM
+- else
+- WARN(1, "Buffer overflow detected!\n");
+-#endif
++
++ if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if (n < INT_MAX) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ memset(to, 0, n);
++ }
+ return n;
+ }
+
+ static __always_inline __must_check
+-int copy_to_user(void __user *dst, const void *src, unsigned size)
++int copy_to_user(void __user *dst, const void *src, unsigned long size)
+ {
+ might_fault();
+
+- return _copy_to_user(dst, src, size);
++ if (access_ok(VERIFY_WRITE, dst, size))
++ size = __copy_to_user(dst, src, size);
++ return size;
+ }
+
+ static __always_inline __must_check
+-int __copy_from_user(void *dst, const void __user *src, unsigned size)
++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
+ {
+- int ret = 0;
++ int sz = __compiletime_object_size(dst);
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
+- return copy_user_generic(dst, (__force void *)src, size);
++
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
++ if (unlikely(sz != -1 && sz < size)) {
++#ifdef CONFIG_DEBUG_VM
++ WARN(1, "Buffer overflow detected!\n");
++#endif
++ return size;
++ }
++
++ if (!__builtin_constant_p(size)) {
++ check_object_size(dst, size, false);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic(dst, (__force_kernel const void *)src, size);
++ }
+ switch (size) {
+- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
++ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ return ret;
+- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
++ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ return ret;
+- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
++ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ return ret;
+- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ return ret;
+ case 10:
+- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 10);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u16 *)(8 + (char *)dst),
+- (u16 __user *)(8 + (char __user *)src),
++ (const u16 __user *)(8 + (const char __user *)src),
+ ret, "w", "w", "=r", 2);
+ return ret;
+ case 16:
+- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 16);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u64 *)(8 + (char *)dst),
+- (u64 __user *)(8 + (char __user *)src),
++ (const u64 __user *)(8 + (const char __user *)src),
+ ret, "q", "", "=r", 8);
+ return ret;
+ default:
+- return copy_user_generic(dst, (__force void *)src, size);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic(dst, (__force_kernel const void *)src, size);
+ }
+ }
+
+ static __always_inline __must_check
+-int __copy_to_user(void __user *dst, const void *src, unsigned size)
++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
+ {
+- int ret = 0;
++ int sz = __compiletime_object_size(src);
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
+- return copy_user_generic((__force void *)dst, src, size);
++
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
++ if (unlikely(sz != -1 && sz < size)) {
++#ifdef CONFIG_DEBUG_VM
++ WARN(1, "Buffer overflow detected!\n");
++#endif
++ return size;
++ }
++
++ if (!__builtin_constant_p(size)) {
++ check_object_size(src, size, true);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic((__force_kernel void *)dst, src, size);
++ }
+ switch (size) {
+- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
++ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
+ ret, "b", "b", "iq", 1);
+ return ret;
+- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
++ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
++ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
+ ret, "l", "k", "ir", 4);
+ return ret;
+- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ case 10:
+- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 10);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
++ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+ case 16:
+- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 16);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
++ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ default:
+- return copy_user_generic((__force void *)dst, src, size);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic((__force_kernel void *)dst, src, size);
+ }
+ }
+
+ static __always_inline __must_check
+-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
+ {
+- int ret = 0;
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
+- return copy_user_generic((__force void *)dst,
+- (__force void *)src, size);
++
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
++ if (!__builtin_constant_p(size)) {
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic((__force_kernel void *)dst,
++ (__force_kernel const void *)src, size);
++ }
+ switch (size) {
+ case 1: {
+ u8 tmp;
+- __get_user_asm(tmp, (u8 __user *)src,
++ __get_user_asm(tmp, (const u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u8 __user *)dst,
+@@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ }
+ case 2: {
+ u16 tmp;
+- __get_user_asm(tmp, (u16 __user *)src,
++ __get_user_asm(tmp, (const u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u16 __user *)dst,
+@@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+
+ case 4: {
+ u32 tmp;
+- __get_user_asm(tmp, (u32 __user *)src,
++ __get_user_asm(tmp, (const u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u32 __user *)dst,
+@@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ }
+ case 8: {
+ u64 tmp;
+- __get_user_asm(tmp, (u64 __user *)src,
++ __get_user_asm(tmp, (const u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u64 __user *)dst,
+@@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ return ret;
+ }
+ default:
+- return copy_user_generic((__force void *)dst,
+- (__force void *)src, size);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic((__force_kernel void *)dst,
++ (__force_kernel const void *)src, size);
+ }
+ }
+
+@@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
+ __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
+
+ static __must_check __always_inline int
+-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
++__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
+ {
+- return copy_user_generic(dst, (__force const void *)src, size);
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic(dst, (__force_kernel const void *)src, size);
+ }
+
+-static __must_check __always_inline int
+-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
++static __must_check __always_inline unsigned long
++__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
+ {
+- return copy_user_generic((__force void *)dst, src, size);
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic((__force_kernel void *)dst, src, size);
+ }
+
+-extern long __copy_user_nocache(void *dst, const void __user *src,
+- unsigned size, int zerorest);
++extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
++ unsigned long size, int zerorest);
+
+-static inline int
+-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
+ {
+ might_sleep();
++
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
+ return __copy_user_nocache(dst, src, size, 1);
+ }
+
+-static inline int
+-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+- unsigned size)
++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
++ unsigned long size)
+ {
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
+ return __copy_user_nocache(dst, src, size, 0);
+ }
+
+-unsigned long
+-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
++extern unsigned long
++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
+
+ #endif /* _ASM_X86_UACCESS_64_H */
+diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
+index bb05228..d763d5b 100644
+--- a/arch/x86/include/asm/vdso.h
++++ b/arch/x86/include/asm/vdso.h
+@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
+ #define VDSO32_SYMBOL(base, name) \
+ ({ \
+ extern const char VDSO32_##name[]; \
+- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
++ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
+ })
+ #endif
+
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index 1971e65..1e3559b 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -28,7 +28,7 @@ struct x86_init_mpparse {
+ void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
+ void (*find_smp_config)(void);
+ void (*get_smp_config)(unsigned int early);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_resources - platform specific resource related ops
+@@ -42,7 +42,7 @@ struct x86_init_resources {
+ void (*probe_roms)(void);
+ void (*reserve_resources)(void);
+ char *(*memory_setup)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_irqs - platform specific interrupt setup
+@@ -55,7 +55,7 @@ struct x86_init_irqs {
+ void (*pre_vector_init)(void);
+ void (*intr_init)(void);
+ void (*trap_init)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_oem - oem platform specific customizing functions
+@@ -65,7 +65,7 @@ struct x86_init_irqs {
+ struct x86_init_oem {
+ void (*arch_setup)(void);
+ void (*banner)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_mapping - platform specific initial kernel pagetable setup
+@@ -76,7 +76,7 @@ struct x86_init_oem {
+ */
+ struct x86_init_mapping {
+ void (*pagetable_reserve)(u64 start, u64 end);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_paging - platform specific paging functions
+@@ -86,7 +86,7 @@ struct x86_init_mapping {
+ struct x86_init_paging {
+ void (*pagetable_setup_start)(pgd_t *base);
+ void (*pagetable_setup_done)(pgd_t *base);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_timers - platform specific timer setup
+@@ -101,7 +101,7 @@ struct x86_init_timers {
+ void (*tsc_pre_init)(void);
+ void (*timer_init)(void);
+ void (*wallclock_init)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_iommu - platform specific iommu setup
+@@ -109,7 +109,7 @@ struct x86_init_timers {
+ */
+ struct x86_init_iommu {
+ int (*iommu_init)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_pci - platform specific pci init functions
+@@ -123,7 +123,7 @@ struct x86_init_pci {
+ int (*init)(void);
+ void (*init_irq)(void);
+ void (*fixup_irqs)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_ops - functions for platform specific setup
+@@ -139,7 +139,7 @@ struct x86_init_ops {
+ struct x86_init_timers timers;
+ struct x86_init_iommu iommu;
+ struct x86_init_pci pci;
+-};
++} __no_const;
+
+ /**
+ * struct x86_cpuinit_ops - platform specific cpu hotplug setups
+@@ -147,7 +147,7 @@ struct x86_init_ops {
+ */
+ struct x86_cpuinit_ops {
+ void (*setup_percpu_clockev)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_platform_ops - platform specific runtime functions
+@@ -169,7 +169,7 @@ struct x86_platform_ops {
+ void (*nmi_init)(void);
+ unsigned char (*get_nmi_reason)(void);
+ int (*i8042_detect)(void);
+-};
++} __no_const;
+
+ struct pci_dev;
+
+@@ -177,7 +177,7 @@ struct x86_msi_ops {
+ int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
+ void (*teardown_msi_irq)(unsigned int irq);
+ void (*teardown_msi_irqs)(struct pci_dev *dev);
+-};
++} __no_const;
+
+ extern struct x86_init_ops x86_init;
+ extern struct x86_cpuinit_ops x86_cpuinit;
+diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
+index c6ce245..ffbdab7 100644
+--- a/arch/x86/include/asm/xsave.h
++++ b/arch/x86/include/asm/xsave.h
+@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
+ {
+ int err;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
++ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
++#endif
++
+ /*
+ * Clear the xsave header first, so that reserved fields are
+ * initialized to zero.
+@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
+ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
+ {
+ int err;
+- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
++ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
++ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
++#endif
++
+ __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
+index 6a564ac..9b1340c 100644
+--- a/arch/x86/kernel/acpi/realmode/Makefile
++++ b/arch/x86/kernel/acpi/realmode/Makefile
+@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+ KBUILD_CFLAGS += $(call cc-option, -m32)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
++endif
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+
+diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
+index b4fd836..4358fe3 100644
+--- a/arch/x86/kernel/acpi/realmode/wakeup.S
++++ b/arch/x86/kernel/acpi/realmode/wakeup.S
+@@ -108,6 +108,9 @@ wakeup_code:
+ /* Do any other stuff... */
+
+ #ifndef CONFIG_64BIT
++ /* Recheck NX bit overrides (64bit path does this in trampoline */
++ call verify_cpu
++
+ /* This could also be done in C code... */
+ movl pmode_cr3, %eax
+ movl %eax, %cr3
+@@ -131,6 +134,7 @@ wakeup_code:
+ movl pmode_cr0, %eax
+ movl %eax, %cr0
+ jmp pmode_return
++# include "../../verify_cpu.S"
+ #else
+ pushw $0
+ pushw trampoline_segment
+diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
+index 103b6ab..2004d0a 100644
+--- a/arch/x86/kernel/acpi/sleep.c
++++ b/arch/x86/kernel/acpi/sleep.c
+@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
+ header->trampoline_segment = trampoline_address() >> 4;
+ #ifdef CONFIG_SMP
+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
++
++ pax_open_kernel();
+ early_gdt_descr.address =
+ (unsigned long)get_cpu_gdt_table(smp_processor_id());
++ pax_close_kernel();
++
+ initial_gs = per_cpu_offset(smp_processor_id());
+ #endif
+ initial_code = (unsigned long)wakeup_long64;
+diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
+index 13ab720..95d5442 100644
+--- a/arch/x86/kernel/acpi/wakeup_32.S
++++ b/arch/x86/kernel/acpi/wakeup_32.S
+@@ -30,13 +30,11 @@ wakeup_pmode_return:
+ # and restore the stack ... but you need gdt for this to work
+ movl saved_context_esp, %esp
+
+- movl %cs:saved_magic, %eax
+- cmpl $0x12345678, %eax
++ cmpl $0x12345678, saved_magic
+ jne bogus_magic
+
+ # jump to place where we left off
+- movl saved_eip, %eax
+- jmp *%eax
++ jmp *(saved_eip)
+
+ bogus_magic:
+ jmp bogus_magic
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 1f84794..e23f862 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
+ */
+ for (a = start; a < end; a++) {
+ instr = (u8 *)&a->instr_offset + a->instr_offset;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
++ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+ replacement = (u8 *)&a->repl_offset + a->repl_offset;
+ BUG_ON(a->replacementlen > a->instrlen);
+ BUG_ON(a->instrlen > sizeof(insnbuf));
+@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
+ for (poff = start; poff < end; poff++) {
+ u8 *ptr = (u8 *)poff + *poff;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+ if (!*poff || ptr < text || ptr >= text_end)
+ continue;
+ /* turn DS segment override prefix into lock prefix */
+- if (*ptr == 0x3e)
++ if (*ktla_ktva(ptr) == 0x3e)
+ text_poke(ptr, ((unsigned char []){0xf0}), 1);
+ };
+ mutex_unlock(&text_mutex);
+@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
+ for (poff = start; poff < end; poff++) {
+ u8 *ptr = (u8 *)poff + *poff;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+ if (!*poff || ptr < text || ptr >= text_end)
+ continue;
+ /* turn lock prefix into DS segment override prefix */
+- if (*ptr == 0xf0)
++ if (*ktla_ktva(ptr) == 0xf0)
+ text_poke(ptr, ((unsigned char []){0x3E}), 1);
+ };
+ mutex_unlock(&text_mutex);
+@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
+
+ BUG_ON(p->len > MAX_PATCH_LEN);
+ /* prep the buffer with the original instructions */
+- memcpy(insnbuf, p->instr, p->len);
++ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
+ used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
+ (unsigned long)p->instr, p->len);
+
+@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
+ if (smp_alt_once)
+ free_init_pages("SMP alternatives",
+ (unsigned long)__smp_locks,
+- (unsigned long)__smp_locks_end);
++ PAGE_ALIGN((unsigned long)__smp_locks_end));
+
+ restart_nmi();
+ }
+@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
+ * instructions. And on the local CPU you need to be protected again NMI or MCE
+ * handlers seeing an inconsistent instruction while you patch.
+ */
+-void *__init_or_module text_poke_early(void *addr, const void *opcode,
++void *__kprobes text_poke_early(void *addr, const void *opcode,
+ size_t len)
+ {
+ unsigned long flags;
+ local_irq_save(flags);
+- memcpy(addr, opcode, len);
++
++ pax_open_kernel();
++ memcpy(ktla_ktva(addr), opcode, len);
+ sync_core();
++ pax_close_kernel();
++
+ local_irq_restore(flags);
+ /* Could also do a CLFLUSH here to speed up CPU recovery; but
+ that causes hangs on some VIA CPUs. */
+@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
+ */
+ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
+ {
+- unsigned long flags;
+- char *vaddr;
++ unsigned char *vaddr = ktla_ktva(addr);
+ struct page *pages[2];
+- int i;
++ size_t i;
+
+ if (!core_kernel_text((unsigned long)addr)) {
+- pages[0] = vmalloc_to_page(addr);
+- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
++ pages[0] = vmalloc_to_page(vaddr);
++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
+ } else {
+- pages[0] = virt_to_page(addr);
++ pages[0] = virt_to_page(vaddr);
+ WARN_ON(!PageReserved(pages[0]));
+- pages[1] = virt_to_page(addr + PAGE_SIZE);
++ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
+ }
+ BUG_ON(!pages[0]);
+- local_irq_save(flags);
+- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
+- if (pages[1])
+- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
+- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
+- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
+- clear_fixmap(FIX_TEXT_POKE0);
+- if (pages[1])
+- clear_fixmap(FIX_TEXT_POKE1);
+- local_flush_tlb();
+- sync_core();
+- /* Could also do a CLFLUSH here to speed up CPU recovery; but
+- that causes hangs on some VIA CPUs. */
++ text_poke_early(addr, opcode, len);
+ for (i = 0; i < len; i++)
+- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
+- local_irq_restore(flags);
++ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
+ return addr;
+ }
+
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index f98d84c..e402a69 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
+ /*
+ * Debug level, exported for io_apic.c
+ */
+-unsigned int apic_verbosity;
++int apic_verbosity;
+
+ int pic_mode;
+
+@@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
+ apic_write(APIC_ESR, 0);
+ v1 = apic_read(APIC_ESR);
+ ack_APIC_irq();
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+
+ apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
+ smp_processor_id(), v0 , v1);
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 6d939d7..0697fcc 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
+ }
+ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+
+-void lock_vector_lock(void)
++void lock_vector_lock(void) __acquires(vector_lock)
+ {
+ /* Used to the online set of cpus does not change
+ * during assign_irq_vector.
+@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
+ raw_spin_lock(&vector_lock);
+ }
+
+-void unlock_vector_lock(void)
++void unlock_vector_lock(void) __releases(vector_lock)
+ {
+ raw_spin_unlock(&vector_lock);
+ }
+@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
+ ack_APIC_irq();
+ }
+
+-atomic_t irq_mis_count;
++atomic_unchecked_t irq_mis_count;
+
+ static void ack_apic_level(struct irq_data *data)
+ {
+@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
+ * at the cpu.
+ */
+ if (!(v & (1 << (i & 0x1f)))) {
+- atomic_inc(&irq_mis_count);
++ atomic_inc_unchecked(&irq_mis_count);
+
+ eoi_ioapic_irq(irq, cfg);
+ }
+diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
+index a46bd38..6b906d7 100644
+--- a/arch/x86/kernel/apm_32.c
++++ b/arch/x86/kernel/apm_32.c
+@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
+ * This is for buggy BIOS's that refer to (real mode) segment 0x40
+ * even though they are called in protected mode.
+ */
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+ (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+
+ static const char driver_version[] = "1.16ac"; /* no spaces */
+@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
+ &call->esi);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+
+ return call->eax & 0xff;
+@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
+ &call->eax);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+ return error;
+ }
+@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
+ * code to that CPU.
+ */
+ gdt = get_cpu_gdt_table(0);
++
++ pax_open_kernel();
+ set_desc_base(&gdt[APM_CS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
+ set_desc_base(&gdt[APM_CS_16 >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
+ set_desc_base(&gdt[APM_DS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
++ pax_close_kernel();
+
+ proc_create("apm", 0, NULL, &apm_file_ops);
+
+diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
+index 4f13faf..87db5d2 100644
+--- a/arch/x86/kernel/asm-offsets.c
++++ b/arch/x86/kernel/asm-offsets.c
+@@ -33,6 +33,8 @@ void common(void) {
+ OFFSET(TI_status, thread_info, status);
+ OFFSET(TI_addr_limit, thread_info, addr_limit);
+ OFFSET(TI_preempt_count, thread_info, preempt_count);
++ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
++ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
+
+ BLANK();
+ OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+@@ -53,8 +55,26 @@ void common(void) {
+ OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
+ OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
+ #endif
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
++ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
++#ifdef CONFIG_X86_64
++ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
++#endif
++#endif
++
++#endif
++
++ BLANK();
++ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
++ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
++ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
++
+ #ifdef CONFIG_XEN
+ BLANK();
+ OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
+diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
+index e72a119..6e2955d 100644
+--- a/arch/x86/kernel/asm-offsets_64.c
++++ b/arch/x86/kernel/asm-offsets_64.c
+@@ -69,6 +69,7 @@ int main(void)
+ BLANK();
+ #undef ENTRY
+
++ DEFINE(TSS_size, sizeof(struct tss_struct));
+ OFFSET(TSS_ist, tss_struct, x86_tss.ist);
+ BLANK();
+
+diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
+index 25f24dc..4094a7f 100644
+--- a/arch/x86/kernel/cpu/Makefile
++++ b/arch/x86/kernel/cpu/Makefile
+@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
+ CFLAGS_REMOVE_perf_event.o = -pg
+ endif
+
+-# Make sure load_percpu_segment has no stackprotector
+-nostackp := $(call cc-option, -fno-stack-protector)
+-CFLAGS_common.o := $(nostackp)
+-
+ obj-y := intel_cacheinfo.o scattered.o topology.o
+ obj-y += proc.o capflags.o powerflags.o common.o
+ obj-y += vmware.o hypervisor.o sched.o mshyperv.o
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 0bab2b1..d0a1bf8 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
+ unsigned int size)
+ {
+ /* AMD errata T13 (order #21922) */
+- if ((c->x86 == 6)) {
++ if (c->x86 == 6) {
+ /* Duron Rev A0 */
+ if (c->x86_model == 3 && c->x86_mask == 0)
+ size = 64;
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index aa003b1..47ea638 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
+
+ static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
+
+-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
+-#ifdef CONFIG_X86_64
+- /*
+- * We need valid kernel segments for data and code in long mode too
+- * IRET will check the segment types kkeil 2000/10/28
+- * Also sysret mandates a special GDT layout
+- *
+- * TLS descriptors are currently at a different place compared to i386.
+- * Hopefully nobody expects them at a fixed place (Wine?)
+- */
+- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
+-#else
+- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
+- /*
+- * Segments used for calling PnP BIOS have byte granularity.
+- * They code segments and data segments have fixed 64k limits,
+- * the transfer segment sizes are set at run time.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+- /* 16-bit code */
+- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
+- /*
+- * The APM segments have byte granularity and their bases
+- * are set at run time. All have 64k limits.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+- /* 16-bit code */
+- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+- /* data */
+- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
+-
+- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- GDT_STACK_CANARY_INIT
+-#endif
+-} };
+-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+-
+ static int __init x86_xsave_setup(char *s)
+ {
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
+ {
+ struct desc_ptr gdt_descr;
+
+- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ /* Reload the per-cpu base */
+@@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ /* Filter out anything that depends on CPUID levels we don't have */
+ filter_cpuid_features(c, true);
+
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++ setup_clear_cpu_cap(X86_FEATURE_SEP);
++#endif
++
+ /* If the model name is still unset, do table lookup. */
+ if (!c->x86_model_id[0]) {
+ const char *p;
+@@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
+ }
+ __setup("clearcpuid=", setup_disablecpuid);
+
++DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
++EXPORT_PER_CPU_SYMBOL(current_tinfo);
++
+ #ifdef CONFIG_X86_64
+ struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
+
+@@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
+ EXPORT_PER_CPU_SYMBOL(current_task);
+
+ DEFINE_PER_CPU(unsigned long, kernel_stack) =
+- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
++ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
+ EXPORT_PER_CPU_SYMBOL(kernel_stack);
+
+ DEFINE_PER_CPU(char *, irq_stack_ptr) =
+@@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
+ {
+ memset(regs, 0, sizeof(struct pt_regs));
+ regs->fs = __KERNEL_PERCPU;
+- regs->gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, regs->gs);
+
+ return regs;
+ }
+@@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
+ int i;
+
+ cpu = stack_smp_processor_id();
+- t = &per_cpu(init_tss, cpu);
++ t = init_tss + cpu;
+ oist = &per_cpu(orig_ist, cpu);
+
+ #ifdef CONFIG_NUMA
+@@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
+ switch_to_new_gdt(cpu);
+ loadsegment(fs, 0);
+
+- load_idt((const struct desc_ptr *)&idt_descr);
++ load_idt(&idt_descr);
+
+ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
+ syscall_init();
+@@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
+ barrier();
+
+- x86_configure_nx();
+ if (cpu != 0)
+ enable_x2apic();
+
+@@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
+ {
+ int cpu = smp_processor_id();
+ struct task_struct *curr = current;
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+ struct thread_struct *thread = &curr->thread;
+
+ if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 5231312..a78a987 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
+ * Update the IDT descriptor and reload the IDT so that
+ * it uses the read-only mapped virtual address.
+ */
+- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
+ load_idt(&idt_descr);
+ }
+ #endif
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 2af127d..8ff7ac0 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -42,6 +42,7 @@
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/local.h>
+
+ #include "mce-internal.h"
+
+@@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
+ !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
+ m->cs, m->ip);
+
+- if (m->cs == __KERNEL_CS)
++ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
+ print_symbol("{%s}", m->ip);
+ pr_cont("\n");
+ }
+@@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
+
+ #define PANIC_TIMEOUT 5 /* 5 seconds */
+
+-static atomic_t mce_paniced;
++static atomic_unchecked_t mce_paniced;
+
+ static int fake_panic;
+-static atomic_t mce_fake_paniced;
++static atomic_unchecked_t mce_fake_paniced;
+
+ /* Panic in progress. Enable interrupts and wait for final IPI */
+ static void wait_for_panic(void)
+@@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
+ /*
+ * Make sure only one CPU runs in machine check panic
+ */
+- if (atomic_inc_return(&mce_paniced) > 1)
++ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
+ wait_for_panic();
+ barrier();
+
+@@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
+ console_verbose();
+ } else {
+ /* Don't log too much for fake panic */
+- if (atomic_inc_return(&mce_fake_paniced) > 1)
++ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
+ return;
+ }
+ /* First print corrected ones that are still unlogged */
+@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
+ * might have been modified by someone else.
+ */
+ rmb();
+- if (atomic_read(&mce_paniced))
++ if (atomic_read_unchecked(&mce_paniced))
+ wait_for_panic();
+ if (!monarch_timeout)
+ goto out;
+@@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
+ }
+
+ /* Call the installed machine check handler for this CPU setup. */
+-void (*machine_check_vector)(struct pt_regs *, long error_code) =
++void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
+ unexpected_machine_check;
+
+ /*
+@@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
+ return;
+ }
+
++ pax_open_kernel();
+ machine_check_vector = do_machine_check;
++ pax_close_kernel();
+
+ __mcheck_cpu_init_generic();
+ __mcheck_cpu_init_vendor(c);
+@@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
+ */
+
+ static DEFINE_SPINLOCK(mce_chrdev_state_lock);
+-static int mce_chrdev_open_count; /* #times opened */
++static local_t mce_chrdev_open_count; /* #times opened */
+ static int mce_chrdev_open_exclu; /* already open exclusive? */
+
+ static int mce_chrdev_open(struct inode *inode, struct file *file)
+@@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
+ spin_lock(&mce_chrdev_state_lock);
+
+ if (mce_chrdev_open_exclu ||
+- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
++ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
+ spin_unlock(&mce_chrdev_state_lock);
+
+ return -EBUSY;
+@@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
+
+ if (file->f_flags & O_EXCL)
+ mce_chrdev_open_exclu = 1;
+- mce_chrdev_open_count++;
++ local_inc(&mce_chrdev_open_count);
+
+ spin_unlock(&mce_chrdev_state_lock);
+
+@@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
+ {
+ spin_lock(&mce_chrdev_state_lock);
+
+- mce_chrdev_open_count--;
++ local_dec(&mce_chrdev_open_count);
+ mce_chrdev_open_exclu = 0;
+
+ spin_unlock(&mce_chrdev_state_lock);
+@@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
+ static void mce_reset(void)
+ {
+ cpu_missing = 0;
+- atomic_set(&mce_fake_paniced, 0);
++ atomic_set_unchecked(&mce_fake_paniced, 0);
+ atomic_set(&mce_executing, 0);
+ atomic_set(&mce_callin, 0);
+ atomic_set(&global_nwo, 0);
+diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
+index 5c0e653..1e82c7c 100644
+--- a/arch/x86/kernel/cpu/mcheck/p5.c
++++ b/arch/x86/kernel/cpu/mcheck/p5.c
+@@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
+ if (!cpu_has(c, X86_FEATURE_MCE))
+ return;
+
++ pax_open_kernel();
+ machine_check_vector = pentium_machine_check;
++ pax_close_kernel();
+ /* Make sure the vector pointer is visible before we enable MCEs: */
+ wmb();
+
+diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
+index 54060f5..e6ba93d 100644
+--- a/arch/x86/kernel/cpu/mcheck/winchip.c
++++ b/arch/x86/kernel/cpu/mcheck/winchip.c
+@@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
+ {
+ u32 lo, hi;
+
++ pax_open_kernel();
+ machine_check_vector = winchip_machine_check;
++ pax_close_kernel();
+ /* Make sure the vector pointer is visible before we enable MCEs: */
+ wmb();
+
+diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
+index 6b96110..0da73eb 100644
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
+ u64 size_or_mask, size_and_mask;
+ static bool mtrr_aps_delayed_init;
+
+-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
+
+ const struct mtrr_ops *mtrr_if;
+
+diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
+index df5e41f..816c719 100644
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
+@@ -25,7 +25,7 @@ struct mtrr_ops {
+ int (*validate_add_page)(unsigned long base, unsigned long size,
+ unsigned int type);
+ int (*have_wrcomb)(void);
+-};
++} __do_const;
+
+ extern int generic_get_free_region(unsigned long base, unsigned long size,
+ int replace_reg);
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 2bda212..78cc605 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ break;
+
+ perf_callchain_store(entry, frame.return_address);
+- fp = frame.next_frame;
++ fp = (const void __force_user *)frame.next_frame;
+ }
+ }
+
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index 13ad899..f642b9a 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ struct pt_regs fixed_regs;
+-#endif
+
+-#ifdef CONFIG_X86_32
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ crash_fixup_ss_esp(&fixed_regs, regs);
+ regs = &fixed_regs;
+ }
+diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
+index 37250fe..bf2ec74 100644
+--- a/arch/x86/kernel/doublefault_32.c
++++ b/arch/x86/kernel/doublefault_32.c
+@@ -11,7 +11,7 @@
+
+ #define DOUBLEFAULT_STACKSIZE (1024)
+ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
+-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
+
+ #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
+
+@@ -21,7 +21,7 @@ static void doublefault_fn(void)
+ unsigned long gdt, tss;
+
+ store_gdt(&gdt_desc);
+- gdt = gdt_desc.address;
++ gdt = (unsigned long)gdt_desc.address;
+
+ printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
+
+@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
+ /* 0x2 bit is always set */
+ .flags = X86_EFLAGS_SF | 0x2,
+ .sp = STACK_START,
+- .es = __USER_DS,
++ .es = __KERNEL_DS,
+ .cs = __KERNEL_CS,
+ .ss = __KERNEL_DS,
+- .ds = __USER_DS,
++ .ds = __KERNEL_DS,
+ .fs = __KERNEL_PERCPU,
+
+ .__cr3 = __pa_nodebug(swapper_pg_dir),
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index 1aae78f..aab3a3d 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -2,6 +2,9 @@
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kallsyms.h>
+ #include <linux/kprobes.h>
+ #include <linux/uaccess.h>
+@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
+ static void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+- struct thread_info *tinfo, int *graph)
++ struct task_struct *task, int *graph)
+ {
+- struct task_struct *task = tinfo->task;
+ unsigned long ret_addr;
+ int index = task->curr_ret_stack;
+
+@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
+ static inline void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+- struct thread_info *tinfo, int *graph)
++ struct task_struct *task, int *graph)
+ { }
+ #endif
+
+@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
+ */
+
+-static inline int valid_stack_ptr(struct thread_info *tinfo,
+- void *p, unsigned int size, void *end)
++static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
+ {
+- void *t = tinfo;
+ if (end) {
+ if (p < end && p >= (end-THREAD_SIZE))
+ return 1;
+@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
+ }
+
+ unsigned long
+-print_context_stack(struct thread_info *tinfo,
++print_context_stack(struct task_struct *task, void *stack_start,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph)
+ {
+ struct stack_frame *frame = (struct stack_frame *)bp;
+
+- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
++ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
+ unsigned long addr;
+
+ addr = *stack;
+@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
+ } else {
+ ops->address(data, addr, 0);
+ }
+- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
++ print_ftrace_graph_addr(addr, data, ops, task, graph);
+ }
+ stack++;
+ }
+@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
+ EXPORT_SYMBOL_GPL(print_context_stack);
+
+ unsigned long
+-print_context_stack_bp(struct thread_info *tinfo,
++print_context_stack_bp(struct task_struct *task, void *stack_start,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph)
+@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
+ struct stack_frame *frame = (struct stack_frame *)bp;
+ unsigned long *ret_addr = &frame->return_address;
+
+- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
++ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
+ unsigned long addr = *ret_addr;
+
+ if (!__kernel_text_address(addr))
+@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
+ ops->address(data, addr, 1);
+ frame = frame->next_frame;
+ ret_addr = &frame->return_address;
+- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
++ print_ftrace_graph_addr(addr, data, ops, task, graph);
+ }
+
+ return (unsigned long)frame;
+@@ -186,7 +186,7 @@ void dump_stack(void)
+
+ bp = stack_frame(current, NULL);
+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
+ }
+ EXPORT_SYMBOL_GPL(oops_begin);
+
++extern void gr_handle_kernel_exploit(void);
++
+ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ {
+ if (regs && kexec_should_crash(current))
+@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+- do_exit(signr);
++
++ gr_handle_kernel_exploit();
++
++ do_group_exit(signr);
+ }
+
+ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+
+ show_registers(regs);
+ #ifdef CONFIG_X86_32
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ sp = regs->sp;
+ ss = regs->ss & 0xffff;
+ } else {
+@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
+ unsigned long flags = oops_begin();
+ int sig = SIGSEGV;
+
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ report_bug(regs->ip, regs);
+
+ if (__die(str, regs, err))
+diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
+index c99f9ed..2a15d80 100644
+--- a/arch/x86/kernel/dumpstack_32.c
++++ b/arch/x86/kernel/dumpstack_32.c
+@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ bp = stack_frame(task, regs);
+
+ for (;;) {
+- struct thread_info *context;
++ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
+
+- context = (struct thread_info *)
+- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
+
+- stack = (unsigned long *)context->previous_esp;
+- if (!stack)
++ if (stack_start == task_stack_page(task))
+ break;
++ stack = *(unsigned long **)stack_start;
+ if (ops->stack(data, "IRQ") < 0)
+ break;
+ touch_nmi_watchdog();
+@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned int code_prologue = code_bytes * 43 / 64;
+ unsigned int code_len = code_bytes;
+ unsigned char c;
+ u8 *ip;
++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
+
+ printk(KERN_EMERG "Stack:\n");
+ show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
+
+ printk(KERN_EMERG "Code: ");
+
+- ip = (u8 *)regs->ip - code_prologue;
++ ip = (u8 *)regs->ip - code_prologue + cs_base;
+ if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
+ /* try starting at IP */
+- ip = (u8 *)regs->ip;
++ ip = (u8 *)regs->ip + cs_base;
+ code_len = code_len - code_prologue + 1;
+ }
+ for (i = 0; i < code_len; i++, ip++) {
+@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
+ printk(KERN_CONT " Bad EIP value.");
+ break;
+ }
+- if (ip == (u8 *)regs->ip)
++ if (ip == (u8 *)regs->ip + cs_base)
+ printk(KERN_CONT "<%02x> ", c);
+ else
+ printk(KERN_CONT "%02x ", c);
+@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
+ {
+ unsigned short ud2;
+
++ ip = ktla_ktva(ip);
+ if (ip < PAGE_OFFSET)
+ return 0;
+ if (probe_kernel_address((unsigned short *)ip, ud2))
+@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
+
+ return ud2 == 0x0b0f;
+ }
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_check_alloca(unsigned long size)
++{
++ unsigned long sp = (unsigned long)&sp, stack_left;
++
++ /* all kernel stacks are of the same size */
++ stack_left = sp & (THREAD_SIZE - 1);
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++}
++EXPORT_SYMBOL(pax_check_alloca);
++#endif
+diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
+index 6d728d9..279514e 100644
+--- a/arch/x86/kernel/dumpstack_64.c
++++ b/arch/x86/kernel/dumpstack_64.c
+@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *irq_stack_end =
+ (unsigned long *)per_cpu(irq_stack_ptr, cpu);
+ unsigned used = 0;
+- struct thread_info *tinfo;
+ int graph = 0;
+ unsigned long dummy;
++ void *stack_start;
+
+ if (!task)
+ task = current;
+@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ * current stack address. If the stacks consist of nested
+ * exceptions
+ */
+- tinfo = task_thread_info(task);
+ for (;;) {
+ char *id;
+ unsigned long *estack_end;
++
+ estack_end = in_exception_stack(cpu, (unsigned long)stack,
+ &used, &id);
+
+@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ if (ops->stack(data, id) < 0)
+ break;
+
+- bp = ops->walk_stack(tinfo, stack, bp, ops,
++ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
+ data, estack_end, &graph);
+ ops->stack(data, "<EOE>");
+ /*
+@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
+ if (ops->stack(data, "IRQ") < 0)
+ break;
+- bp = ops->walk_stack(tinfo, stack, bp,
++ bp = ops->walk_stack(task, irq_stack, stack, bp,
+ ops, data, irq_stack_end, &graph);
+ /*
+ * We link to the next stack (which would be
+@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ /*
+ * This handles the process stack:
+ */
+- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
++ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
+ put_cpu();
+ }
+ EXPORT_SYMBOL(dump_trace);
+@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
+
+ return ud2 == 0x0b0f;
+ }
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_check_alloca(unsigned long size)
++{
++ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
++ unsigned cpu, used;
++ char *id;
++
++ /* check the process stack first */
++ stack_start = (unsigned long)task_stack_page(current);
++ stack_end = stack_start + THREAD_SIZE;
++ if (likely(stack_start <= sp && sp < stack_end)) {
++ unsigned long stack_left = sp & (THREAD_SIZE - 1);
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++ return;
++ }
++
++ cpu = get_cpu();
++
++ /* check the irq stacks */
++ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
++ stack_start = stack_end - IRQ_STACK_SIZE;
++ if (stack_start <= sp && sp < stack_end) {
++ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
++ put_cpu();
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++ return;
++ }
++
++ /* check the exception stacks */
++ used = 0;
++ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
++ stack_start = stack_end - EXCEPTION_STKSZ;
++ if (stack_end && stack_start <= sp && sp < stack_end) {
++ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
++ put_cpu();
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++ return;
++ }
++
++ put_cpu();
++
++ /* unknown stack */
++ BUG();
++}
++EXPORT_SYMBOL(pax_check_alloca);
++#endif
+diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
+index cd28a35..c72ed9a 100644
+--- a/arch/x86/kernel/early_printk.c
++++ b/arch/x86/kernel/early_printk.c
+@@ -7,6 +7,7 @@
+ #include <linux/pci_regs.h>
+ #include <linux/pci_ids.h>
+ #include <linux/errno.h>
++#include <linux/sched.h>
+ #include <asm/io.h>
+ #include <asm/processor.h>
+ #include <asm/fcntl.h>
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index f3f6f53..0841b66 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -186,13 +186,146 @@
+ /*CFI_REL_OFFSET gs, PT_GS*/
+ .endm
+ .macro SET_KERNEL_GS reg
++
++#ifdef CONFIG_CC_STACKPROTECTOR
+ movl $(__KERNEL_STACK_CANARY), \reg
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS), \reg
++#else
++ xorl \reg, \reg
++#endif
++
+ movl \reg, %gs
+ .endm
+
+ #endif /* CONFIG_X86_32_LAZY_GS */
+
+-.macro SAVE_ALL
++.macro pax_enter_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_enter_kernel
++#endif
++.endm
++
++.macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_exit_kernel
++#endif
++.endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ bts $16, %esi
++ jnc 1f
++ mov %cs, %esi
++ cmp $__KERNEL_CS, %esi
++ jz 3f
++ ljmp $__KERNEL_CS, $3f
++1: ljmp $__KERNEXEC_KERNEL_CS, $2f
++2:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++#else
++ mov %esi, %cr0
++#endif
++3:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++#endif
++ mov %cs, %esi
++ cmp $__KERNEXEC_KERNEL_CS, %esi
++ jnz 2f
++#ifdef CONFIG_PARAVIRT
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ btr $16, %esi
++ ljmp $__KERNEL_CS, $1f
++1:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
++#else
++ mov %esi, %cr0
++#endif
++2:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_exit_kernel)
++#endif
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++/*
++ * ebp: thread_info
++ * ecx, edx: can be clobbered
++ */
++ENTRY(pax_erase_kstack)
++ pushl %edi
++ pushl %eax
++
++ mov TI_lowest_stack(%ebp), %edi
++ mov $-0xBEEF, %eax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $2, %ecx
++ repne scasl
++ jecxz 2f
++
++ cmp $2*16, %ecx
++ jc 2f
++
++ mov $2*16, %ecx
++ repe scasl
++ jecxz 2f
++ jne 1b
++
++2: cld
++ mov %esp, %ecx
++ sub %edi, %ecx
++ shr $2, %ecx
++ rep stosl
++
++ mov TI_task_thread_sp0(%ebp), %edi
++ sub $128, %edi
++ mov %edi, TI_lowest_stack(%ebp)
++
++ popl %eax
++ popl %edi
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
++
++.macro __SAVE_ALL _DS
+ cld
+ PUSH_GS
+ pushl_cfi %fs
+@@ -215,7 +348,7 @@
+ CFI_REL_OFFSET ecx, 0
+ pushl_cfi %ebx
+ CFI_REL_OFFSET ebx, 0
+- movl $(__USER_DS), %edx
++ movl $\_DS, %edx
+ movl %edx, %ds
+ movl %edx, %es
+ movl $(__KERNEL_PERCPU), %edx
+@@ -223,6 +356,15 @@
+ SET_KERNEL_GS %edx
+ .endm
+
++.macro SAVE_ALL
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ __SAVE_ALL __KERNEL_DS
++ pax_enter_kernel
++#else
++ __SAVE_ALL __USER_DS
++#endif
++.endm
++
+ .macro RESTORE_INT_REGS
+ popl_cfi %ebx
+ CFI_RESTORE ebx
+@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
+ popfl_cfi
+ jmp syscall_exit
+ CFI_ENDPROC
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+
+ /*
+ * Interrupt exit functions should be protected against kprobes
+@@ -333,7 +475,15 @@ check_userspace:
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
+ cmpl $USER_RPL, %eax
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jae resume_userspace
++
++ PAX_EXIT_KERNEL
++ jmp resume_kernel
++#else
+ jb resume_kernel # not returning to v8086 or userspace
++#endif
+
+ ENTRY(resume_userspace)
+ LOCKDEP_SYS_EXIT
+@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
+ # int/exception return?
+ jne work_pending
+- jmp restore_all
+-END(ret_from_exception)
++ jmp restore_all_pax
++ENDPROC(ret_from_exception)
+
+ #ifdef CONFIG_PREEMPT
+ ENTRY(resume_kernel)
+@@ -361,7 +511,7 @@ need_resched:
+ jz restore_all
+ call preempt_schedule_irq
+ jmp need_resched
+-END(resume_kernel)
++ENDPROC(resume_kernel)
+ #endif
+ CFI_ENDPROC
+ /*
+@@ -395,23 +545,34 @@ sysenter_past_esp:
+ /*CFI_REL_OFFSET cs, 0*/
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+- * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ */
+- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
++ pushl_cfi $0
+ CFI_REL_OFFSET eip, 0
+
+ pushl_cfi %eax
+ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl TI_sysenter_return(%ebp),%ebp
++ movl %ebp,PT_EIP(%esp)
+ ENABLE_INTERRUPTS(CLBR_NONE)
+
+ /*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
++ movl PT_OLDESP(%esp),%ebp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov PT_OLDSS(%esp),%ds
++1: movl %ds:(%ebp),%ebp
++ push %ss
++ pop %ds
++#else
+ cmpl $__PAGE_OFFSET-3,%ebp
+ jae syscall_fault
+ 1: movl (%ebp),%ebp
++#endif
++
+ movl %ebp,PT_EBP(%esp)
+ .section __ex_table,"a"
+ .align 4
+@@ -434,12 +595,24 @@ sysenter_do_call:
+ testl $_TIF_ALLWORK_MASK, %ecx
+ jne sysexit_audit
+ sysenter_exit:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushl_cfi %eax
++ movl %esp, %eax
++ call pax_randomize_kstack
++ popl_cfi %eax
++#endif
++
++ pax_erase_kstack
++
+ /* if something modifies registers it must also disable sysexit */
+ movl PT_EIP(%esp), %edx
+ movl PT_OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+ TRACE_IRQS_ON
+ 1: mov PT_FS(%esp), %fs
++2: mov PT_DS(%esp), %ds
++3: mov PT_ES(%esp), %es
+ PTGS_TO_GS
+ ENABLE_INTERRUPTS_SYSEXIT
+
+@@ -456,6 +629,9 @@ sysenter_audit:
+ movl %eax,%edx /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
+ call audit_syscall_entry
++
++ pax_erase_kstack
++
+ pushl_cfi %ebx
+ movl PT_EAX(%esp),%eax /* reload syscall number */
+ jmp sysenter_do_call
+@@ -482,11 +658,17 @@ sysexit_audit:
+
+ CFI_ENDPROC
+ .pushsection .fixup,"ax"
+-2: movl $0,PT_FS(%esp)
++4: movl $0,PT_FS(%esp)
++ jmp 1b
++5: movl $0,PT_DS(%esp)
++ jmp 1b
++6: movl $0,PT_ES(%esp)
+ jmp 1b
+ .section __ex_table,"a"
+ .align 4
+- .long 1b,2b
++ .long 1b,4b
++ .long 2b,5b
++ .long 3b,6b
+ .popsection
+ PTGS_TO_GS_EX
+ ENDPROC(ia32_sysenter_target)
+@@ -519,6 +701,15 @@ syscall_exit:
+ testl $_TIF_ALLWORK_MASK, %ecx # current->work
+ jne syscall_exit_work
+
++restore_all_pax:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ movl %esp, %eax
++ call pax_randomize_kstack
++#endif
++
++ pax_erase_kstack
++
+ restore_all:
+ TRACE_IRQS_IRET
+ restore_all_notrace:
+@@ -578,14 +769,34 @@ ldt_ss:
+ * compensating for the offset by changing to the ESPFIX segment with
+ * a base address that matches for the difference.
+ */
+-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
+ mov %esp, %edx /* load kernel esp */
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ shr $16, %edx
+- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
+- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cr0, %esi
++ btr $16, %esi
++ mov %esi, %cr0
++#endif
++
++ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
++ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ bts $16, %esi
++ mov %esi, %cr0
++#endif
++
+ pushl_cfi $__ESPFIX_SS
+ pushl_cfi %eax /* new kernel esp */
+ /* Disable interrupts, but do not irqtrace this section: we
+@@ -614,34 +825,28 @@ work_resched:
+ movl TI_flags(%ebp), %ecx
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
+ # than syscall tracing?
+- jz restore_all
++ jz restore_all_pax
+ testb $_TIF_NEED_RESCHED, %cl
+ jnz work_resched
+
+ work_notifysig: # deal with pending signals and
+ # notify-resume requests
++ movl %esp, %eax
+ #ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
+- movl %esp, %eax
+- jne work_notifysig_v86 # returning to kernel-space or
++ jz 1f # returning to kernel-space or
+ # vm86-space
+- xorl %edx, %edx
+- call do_notify_resume
+- jmp resume_userspace_sig
+
+- ALIGN
+-work_notifysig_v86:
+ pushl_cfi %ecx # save ti_flags for do_notify_resume
+ call save_v86_state # %eax contains pt_regs pointer
+ popl_cfi %ecx
+ movl %eax, %esp
+-#else
+- movl %esp, %eax
++1:
+ #endif
+ xorl %edx, %edx
+ call do_notify_resume
+ jmp resume_userspace_sig
+-END(work_pending)
++ENDPROC(work_pending)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -649,11 +854,14 @@ syscall_trace_entry:
+ movl $-ENOSYS,PT_EAX(%esp)
+ movl %esp, %eax
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ /* What it returned is what we'll actually use. */
+ cmpl $(nr_syscalls), %eax
+ jnae syscall_call
+ jmp syscall_exit
+-END(syscall_trace_entry)
++ENDPROC(syscall_trace_entry)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -666,20 +874,24 @@ syscall_exit_work:
+ movl %esp, %eax
+ call syscall_trace_leave
+ jmp resume_userspace
+-END(syscall_exit_work)
++ENDPROC(syscall_exit_work)
+ CFI_ENDPROC
+
+ RING0_INT_FRAME # can't unwind into user space anyway
+ syscall_fault:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ push %ss
++ pop %ds
++#endif
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT,PT_EAX(%esp)
+ jmp resume_userspace
+-END(syscall_fault)
++ENDPROC(syscall_fault)
+
+ syscall_badsys:
+ movl $-ENOSYS,PT_EAX(%esp)
+ jmp resume_userspace
+-END(syscall_badsys)
++ENDPROC(syscall_badsys)
+ CFI_ENDPROC
+ /*
+ * End of kprobes section
+@@ -753,6 +965,36 @@ ptregs_clone:
+ CFI_ENDPROC
+ ENDPROC(ptregs_clone)
+
++ ALIGN;
++ENTRY(kernel_execve)
++ CFI_STARTPROC
++ pushl_cfi %ebp
++ sub $PT_OLDSS+4,%esp
++ pushl_cfi %edi
++ pushl_cfi %ecx
++ pushl_cfi %eax
++ lea 3*4(%esp),%edi
++ mov $PT_OLDSS/4+1,%ecx
++ xorl %eax,%eax
++ rep stosl
++ popl_cfi %eax
++ popl_cfi %ecx
++ popl_cfi %edi
++ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
++ pushl_cfi %esp
++ call sys_execve
++ add $4,%esp
++ CFI_ADJUST_CFA_OFFSET -4
++ GET_THREAD_INFO(%ebp)
++ test %eax,%eax
++ jz syscall_exit
++ add $PT_OLDSS+4,%esp
++ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
++ popl_cfi %ebp
++ ret
++ CFI_ENDPROC
++ENDPROC(kernel_execve)
++
+ .macro FIXUP_ESPFIX_STACK
+ /*
+ * Switch back for ESPFIX stack to the normal zerobased stack
+@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
+ * normal stack and adjusts ESP with the matching offset.
+ */
+ /* fixup the stack */
+- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
+- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
++ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
++ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
+ shl $16, %eax
+ addl %esp, %eax /* the adjusted stack pointer */
+ pushl_cfi $__KERNEL_DS
+@@ -816,7 +1065,7 @@ vector=vector+1
+ .endr
+ 2: jmp common_interrupt
+ .endr
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+
+ .previous
+ END(interrupt)
+@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
+ pushl_cfi $do_coprocessor_error
+ jmp error_code
+ CFI_ENDPROC
+-END(coprocessor_error)
++ENDPROC(coprocessor_error)
+
+ ENTRY(simd_coprocessor_error)
+ RING0_INT_FRAME
+@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
+ #endif
+ jmp error_code
+ CFI_ENDPROC
+-END(simd_coprocessor_error)
++ENDPROC(simd_coprocessor_error)
+
+ ENTRY(device_not_available)
+ RING0_INT_FRAME
+@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
+ pushl_cfi $do_device_not_available
+ jmp error_code
+ CFI_ENDPROC
+-END(device_not_available)
++ENDPROC(device_not_available)
+
+ #ifdef CONFIG_PARAVIRT
+ ENTRY(native_iret)
+@@ -902,12 +1151,12 @@ ENTRY(native_iret)
+ .align 4
+ .long native_iret, iret_exc
+ .previous
+-END(native_iret)
++ENDPROC(native_iret)
+
+ ENTRY(native_irq_enable_sysexit)
+ sti
+ sysexit
+-END(native_irq_enable_sysexit)
++ENDPROC(native_irq_enable_sysexit)
+ #endif
+
+ ENTRY(overflow)
+@@ -916,7 +1165,7 @@ ENTRY(overflow)
+ pushl_cfi $do_overflow
+ jmp error_code
+ CFI_ENDPROC
+-END(overflow)
++ENDPROC(overflow)
+
+ ENTRY(bounds)
+ RING0_INT_FRAME
+@@ -924,7 +1173,7 @@ ENTRY(bounds)
+ pushl_cfi $do_bounds
+ jmp error_code
+ CFI_ENDPROC
+-END(bounds)
++ENDPROC(bounds)
+
+ ENTRY(invalid_op)
+ RING0_INT_FRAME
+@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
+ pushl_cfi $do_invalid_op
+ jmp error_code
+ CFI_ENDPROC
+-END(invalid_op)
++ENDPROC(invalid_op)
+
+ ENTRY(coprocessor_segment_overrun)
+ RING0_INT_FRAME
+@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
+ pushl_cfi $do_coprocessor_segment_overrun
+ jmp error_code
+ CFI_ENDPROC
+-END(coprocessor_segment_overrun)
++ENDPROC(coprocessor_segment_overrun)
+
+ ENTRY(invalid_TSS)
+ RING0_EC_FRAME
+ pushl_cfi $do_invalid_TSS
+ jmp error_code
+ CFI_ENDPROC
+-END(invalid_TSS)
++ENDPROC(invalid_TSS)
+
+ ENTRY(segment_not_present)
+ RING0_EC_FRAME
+ pushl_cfi $do_segment_not_present
+ jmp error_code
+ CFI_ENDPROC
+-END(segment_not_present)
++ENDPROC(segment_not_present)
+
+ ENTRY(stack_segment)
+ RING0_EC_FRAME
+ pushl_cfi $do_stack_segment
+ jmp error_code
+ CFI_ENDPROC
+-END(stack_segment)
++ENDPROC(stack_segment)
+
+ ENTRY(alignment_check)
+ RING0_EC_FRAME
+ pushl_cfi $do_alignment_check
+ jmp error_code
+ CFI_ENDPROC
+-END(alignment_check)
++ENDPROC(alignment_check)
+
+ ENTRY(divide_error)
+ RING0_INT_FRAME
+@@ -976,7 +1225,7 @@ ENTRY(divide_error)
+ pushl_cfi $do_divide_error
+ jmp error_code
+ CFI_ENDPROC
+-END(divide_error)
++ENDPROC(divide_error)
+
+ #ifdef CONFIG_X86_MCE
+ ENTRY(machine_check)
+@@ -985,7 +1234,7 @@ ENTRY(machine_check)
+ pushl_cfi machine_check_vector
+ jmp error_code
+ CFI_ENDPROC
+-END(machine_check)
++ENDPROC(machine_check)
+ #endif
+
+ ENTRY(spurious_interrupt_bug)
+@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
+ pushl_cfi $do_spurious_interrupt_bug
+ jmp error_code
+ CFI_ENDPROC
+-END(spurious_interrupt_bug)
++ENDPROC(spurious_interrupt_bug)
+ /*
+ * End of kprobes section
+ */
+@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
+
+ ENTRY(mcount)
+ ret
+-END(mcount)
++ENDPROC(mcount)
+
+ ENTRY(ftrace_caller)
+ cmpl $0, function_trace_stop
+@@ -1138,7 +1387,7 @@ ftrace_graph_call:
+ .globl ftrace_stub
+ ftrace_stub:
+ ret
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+
+ #else /* ! CONFIG_DYNAMIC_FTRACE */
+
+@@ -1174,7 +1423,7 @@ trace:
+ popl %ecx
+ popl %eax
+ jmp ftrace_stub
+-END(mcount)
++ENDPROC(mcount)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+
+@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
+ popl %ecx
+ popl %eax
+ ret
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+
+ .globl return_to_handler
+ return_to_handler:
+@@ -1209,7 +1458,6 @@ return_to_handler:
+ jmp *%ecx
+ #endif
+
+-.section .rodata,"a"
+ #include "syscall_table_32.S"
+
+ syscall_table_size=(.-sys_call_table)
+@@ -1255,15 +1503,18 @@ error_code:
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+ REG_TO_PTGS %ecx
+ SET_KERNEL_GS %ecx
+- movl $(__USER_DS), %ecx
++ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
++
++ pax_enter_kernel
++
+ TRACE_IRQS_OFF
+ movl %esp,%eax # pt_regs pointer
+ call *%edi
+ jmp ret_from_exception
+ CFI_ENDPROC
+-END(page_fault)
++ENDPROC(page_fault)
+
+ /*
+ * Debug traps and NMI can happen at the one SYSENTER instruction
+@@ -1305,7 +1556,7 @@ debug_stack_correct:
+ call do_debug
+ jmp ret_from_exception
+ CFI_ENDPROC
+-END(debug)
++ENDPROC(debug)
+
+ /*
+ * NMI is doubly nasty. It can happen _while_ we're handling
+@@ -1342,6 +1593,9 @@ nmi_stack_correct:
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_nmi
++
++ pax_exit_kernel
++
+ jmp restore_all_notrace
+ CFI_ENDPROC
+
+@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
+ FIXUP_ESPFIX_STACK # %eax == %esp
+ xorl %edx,%edx # zero error code
+ call do_nmi
++
++ pax_exit_kernel
++
+ RESTORE_REGS
+ lss 12+4(%esp), %esp # back to espfix stack
+ CFI_ADJUST_CFA_OFFSET -24
+ jmp irq_return
+ CFI_ENDPROC
+-END(nmi)
++ENDPROC(nmi)
+
+ ENTRY(int3)
+ RING0_INT_FRAME
+@@ -1395,14 +1652,14 @@ ENTRY(int3)
+ call do_int3
+ jmp ret_from_exception
+ CFI_ENDPROC
+-END(int3)
++ENDPROC(int3)
+
+ ENTRY(general_protection)
+ RING0_EC_FRAME
+ pushl_cfi $do_general_protection
+ jmp error_code
+ CFI_ENDPROC
+-END(general_protection)
++ENDPROC(general_protection)
+
+ #ifdef CONFIG_KVM_GUEST
+ ENTRY(async_page_fault)
+@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
+ pushl_cfi $do_async_page_fault
+ jmp error_code
+ CFI_ENDPROC
+-END(async_page_fault)
++ENDPROC(async_page_fault)
+ #endif
+
+ /*
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index faf8d5e..f58c441 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -55,6 +55,8 @@
+ #include <asm/paravirt.h>
+ #include <asm/ftrace.h>
+ #include <asm/percpu.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -68,8 +70,9 @@
+ #ifdef CONFIG_FUNCTION_TRACER
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ ENTRY(mcount)
++ pax_force_retaddr
+ retq
+-END(mcount)
++ENDPROC(mcount)
+
+ ENTRY(ftrace_caller)
+ cmpl $0, function_trace_stop
+@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
+ #endif
+
+ GLOBAL(ftrace_stub)
++ pax_force_retaddr
+ retq
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+
+ #else /* ! CONFIG_DYNAMIC_FTRACE */
+ ENTRY(mcount)
+@@ -112,6 +116,7 @@ ENTRY(mcount)
+ #endif
+
+ GLOBAL(ftrace_stub)
++ pax_force_retaddr
+ retq
+
+ trace:
+@@ -121,12 +126,13 @@ trace:
+ movq 8(%rbp), %rsi
+ subq $MCOUNT_INSN_SIZE, %rdi
+
++ pax_force_fptr ftrace_trace_function
+ call *ftrace_trace_function
+
+ MCOUNT_RESTORE_FRAME
+
+ jmp ftrace_stub
+-END(mcount)
++ENDPROC(mcount)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+
+@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
+
+ MCOUNT_RESTORE_FRAME
+
++ pax_force_retaddr
+ retq
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+
+ GLOBAL(return_to_handler)
+ subq $24, %rsp
+@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
+ movq 8(%rsp), %rdx
+ movq (%rsp), %rax
+ addq $24, %rsp
++ pax_force_fptr %rdi
+ jmp *%rdi
+ #endif
+
+@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
+ ENDPROC(native_usergs_sysret64)
+ #endif /* CONFIG_PARAVIRT */
+
++ .macro ljmpq sel, off
++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
++ .byte 0x48; ljmp *1234f(%rip)
++ .pushsection .rodata
++ .align 16
++ 1234: .quad \off; .word \sel
++ .popsection
++#else
++ pushq $\sel
++ pushq $\off
++ lretq
++#endif
++ .endm
++
++ .macro pax_enter_kernel
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_enter_kernel
++#endif
++ .endm
++
++ .macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_exit_kernel
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ jnc 3f
++ mov %cs,%edi
++ cmp $__KERNEL_CS,%edi
++ jnz 2f
++1:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++
++2: ljmpq __KERNEL_CS,1f
++3: ljmpq __KERNEXEC_KERNEL_CS,4f
++4: SET_RDI_INTO_CR0
++ jmp 1b
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ mov %cs,%rdi
++ cmp $__KERNEXEC_KERNEL_CS,%edi
++ jz 2f
++1:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI);
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++
++2: GET_CR0_INTO_RDI
++ btr $16,%rdi
++ ljmpq __KERNEL_CS,3f
++3: SET_RDI_INTO_CR0
++ jmp 1b
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI);
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++ENDPROC(pax_exit_kernel)
++#endif
++
++ .macro pax_enter_kernel_user
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushq %rax
++ call pax_randomize_kstack
++ popq %rax
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ENTRY(pax_enter_kernel_user)
++ pushq %rdi
++ pushq %rbx
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR3_INTO_RDI
++ mov %rdi,%rbx
++ add $__START_KERNEL_map,%rbx
++ sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++ pushq %rdi
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0,i*8(%rbx)
++ i = i + 1
++ .endr
++
++#ifdef CONFIG_PARAVIRT
++2: popq %rdi
++#endif
++ SET_RDI_INTO_CR3
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rbx
++ popq %rdi
++ pax_force_retaddr
++ retq
++ENDPROC(pax_enter_kernel_user)
++
++ENTRY(pax_exit_kernel_user)
++ push %rdi
++
++#ifdef CONFIG_PARAVIRT
++ pushq %rbx
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ btr $16,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++ GET_CR3_INTO_RDI
++ add $__START_KERNEL_map,%rdi
++ sub phys_base(%rip),%rdi
++
++#ifdef CONFIG_PARAVIRT
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ mov %rdi,%rbx
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0x67,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0x67,i*8(%rdi)
++ i = i + 1
++ .endr
++
++#ifdef CONFIG_PARAVIRT
++2: PV_RESTORE_REGS(CLBR_RDI)
++ popq %rbx
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++ENDPROC(pax_exit_kernel_user)
++#endif
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++/*
++ * r11: thread_info
++ * rcx, rdx: can be clobbered
++ */
++ENTRY(pax_erase_kstack)
++ pushq %rdi
++ pushq %rax
++ pushq %r11
++
++ GET_THREAD_INFO(%r11)
++ mov TI_lowest_stack(%r11), %rdi
++ mov $-0xBEEF, %rax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $3, %ecx
++ repne scasq
++ jecxz 2f
++
++ cmp $2*8, %ecx
++ jc 2f
++
++ mov $2*8, %ecx
++ repe scasq
++ jecxz 2f
++ jne 1b
++
++2: cld
++ mov %esp, %ecx
++ sub %edi, %ecx
++
++ cmp $THREAD_SIZE_asm, %rcx
++ jb 3f
++ ud2
++3:
++
++ shr $3, %ecx
++ rep stosq
++
++ mov TI_task_thread_sp0(%r11), %rdi
++ sub $256, %rdi
++ mov %rdi, TI_lowest_stack(%r11)
++
++ popq %r11
++ popq %rax
++ popq %rdi
++ pax_force_retaddr
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
+
+ .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
+ #ifdef CONFIG_TRACE_IRQFLAGS
+@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
+ .endm
+
+ .macro UNFAKE_STACK_FRAME
+- addq $8*6, %rsp
+- CFI_ADJUST_CFA_OFFSET -(6*8)
++ addq $8*6 + ARG_SKIP, %rsp
++ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
+ .endm
+
+ /*
+@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
+ movq %rsp, %rsi
+
+ leaq -RBP(%rsp),%rdi /* arg1 for handler */
+- testl $3, CS(%rdi)
++ testb $3, CS(%rdi)
+ je 1f
+ SWAPGS
+ /*
+@@ -355,9 +639,10 @@ ENTRY(save_rest)
+ movq_cfi r15, R15+16
+ movq %r11, 8(%rsp) /* return address */
+ FIXUP_TOP_OF_STACK %r11, 16
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(save_rest)
++ENDPROC(save_rest)
+
+ /* save complete stack frame */
+ .pushsection .kprobes.text, "ax"
+@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
+ js 1f /* negative -> in kernel */
+ SWAPGS
+ xorl %ebx,%ebx
+-1: ret
++1: pax_force_retaddr_bts
++ ret
+ CFI_ENDPROC
+-END(save_paranoid)
++ENDPROC(save_paranoid)
+ .popsection
+
+ /*
+@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
+
+ RESTORE_REST
+
+- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
++ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
+ je int_ret_from_sys_call
+
+ testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
+@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
+ jmp ret_from_sys_call # go to the SYSRET fastpath
+
+ CFI_ENDPROC
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+
+ /*
+ * System call entry. Up to 6 arguments in registers are supported.
+@@ -456,7 +742,7 @@ END(ret_from_fork)
+ ENTRY(system_call)
+ CFI_STARTPROC simple
+ CFI_SIGNAL_FRAME
+- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
++ CFI_DEF_CFA rsp,0
+ CFI_REGISTER rip,rcx
+ /*CFI_REGISTER rflags,r11*/
+ SWAPGS_UNSAFE_STACK
+@@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
+
+ movq %rsp,PER_CPU_VAR(old_rsp)
+ movq PER_CPU_VAR(kernel_stack),%rsp
++ SAVE_ARGS 8*6,0
++ pax_enter_kernel_user
+ /*
+ * No need to follow this irqs off/on section - it's straight
+ * and short:
+ */
+ ENABLE_INTERRUPTS(CLBR_NONE)
+- SAVE_ARGS 8,0
+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ movq %rcx,RIP-ARGOFFSET(%rsp)
+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
+@@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
+ system_call_fastpath:
+ cmpq $__NR_syscall_max,%rax
+ ja badsys
+- movq %r10,%rcx
++ movq R10-ARGOFFSET(%rsp),%rcx
+ call *sys_call_table(,%rax,8) # XXX: rip relative
+ movq %rax,RAX-ARGOFFSET(%rsp)
+ /*
+@@ -503,6 +790,8 @@ sysret_check:
+ andl %edi,%edx
+ jnz sysret_careful
+ CFI_REMEMBER_STATE
++ pax_exit_kernel_user
++ pax_erase_kstack
+ /*
+ * sysretq will re-enable interrupts:
+ */
+@@ -554,14 +843,18 @@ badsys:
+ * jump back to the normal fast path.
+ */
+ auditsys:
+- movq %r10,%r9 /* 6th arg: 4th syscall arg */
++ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
+ movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
+ movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
+ movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
+ movq %rax,%rsi /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
+ call audit_syscall_entry
++
++ pax_erase_kstack
++
+ LOAD_ARGS 0 /* reload call-clobbered registers */
++ pax_set_fptr_mask
+ jmp system_call_fastpath
+
+ /*
+@@ -591,16 +884,20 @@ tracesys:
+ FIXUP_TOP_OF_STACK %rdi
+ movq %rsp,%rdi
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ /*
+ * Reload arg registers from stack in case ptrace changed them.
+ * We don't reload %rax because syscall_trace_enter() returned
+ * the value it wants us to use in the table lookup.
+ */
+ LOAD_ARGS ARGOFFSET, 1
++ pax_set_fptr_mask
+ RESTORE_REST
+ cmpq $__NR_syscall_max,%rax
+ ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
+- movq %r10,%rcx /* fixup for C */
++ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
+ call *sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
+ /* Use IRET because user could have changed frame */
+@@ -612,7 +909,7 @@ tracesys:
+ GLOBAL(int_ret_from_sys_call)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $3,CS-ARGOFFSET(%rsp)
++ testb $3,CS-ARGOFFSET(%rsp)
+ je retint_restore_args
+ movl $_TIF_ALLWORK_MASK,%edi
+ /* edi: mask to check */
+@@ -669,7 +966,7 @@ int_restore_rest:
+ TRACE_IRQS_OFF
+ jmp int_with_check
+ CFI_ENDPROC
+-END(system_call)
++ENDPROC(system_call)
+
+ /*
+ * Certain special system calls that need to save a complete full stack frame.
+@@ -685,7 +982,7 @@ ENTRY(\label)
+ call \func
+ jmp ptregscall_common
+ CFI_ENDPROC
+-END(\label)
++ENDPROC(\label)
+ .endm
+
+ PTREGSCALL stub_clone, sys_clone, %r8
+@@ -703,9 +1000,10 @@ ENTRY(ptregscall_common)
+ movq_cfi_restore R12+8, r12
+ movq_cfi_restore RBP+8, rbp
+ movq_cfi_restore RBX+8, rbx
++ pax_force_retaddr
+ ret $REST_SKIP /* pop extended registers */
+ CFI_ENDPROC
+-END(ptregscall_common)
++ENDPROC(ptregscall_common)
+
+ ENTRY(stub_execve)
+ CFI_STARTPROC
+@@ -720,7 +1018,7 @@ ENTRY(stub_execve)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+-END(stub_execve)
++ENDPROC(stub_execve)
+
+ /*
+ * sigreturn is special because it needs to restore all registers on return.
+@@ -738,7 +1036,7 @@ ENTRY(stub_rt_sigreturn)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+-END(stub_rt_sigreturn)
++ENDPROC(stub_rt_sigreturn)
+
+ /*
+ * Build the entry stubs and pointer table with some assembler magic.
+@@ -773,7 +1071,7 @@ vector=vector+1
+ 2: jmp common_interrupt
+ .endr
+ CFI_ENDPROC
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+
+ .previous
+ END(interrupt)
+@@ -793,6 +1091,16 @@ END(interrupt)
+ subq $ORIG_RAX-RBP, %rsp
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
+ SAVE_ARGS_IRQ
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rdi)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ call \func
+ .endm
+
+@@ -824,7 +1132,7 @@ ret_from_intr:
+
+ exit_intr:
+ GET_THREAD_INFO(%rcx)
+- testl $3,CS-ARGOFFSET(%rsp)
++ testb $3,CS-ARGOFFSET(%rsp)
+ je retint_kernel
+
+ /* Interrupt came from user space */
+@@ -846,12 +1154,16 @@ retint_swapgs: /* return to user-space */
+ * The iretq could re-enable interrupts:
+ */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ pax_exit_kernel_user
++ pax_erase_kstack
+ TRACE_IRQS_IRETQ
+ SWAPGS
+ jmp restore_args
+
+ retint_restore_args: /* return to kernel space */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ pax_exit_kernel
++ pax_force_retaddr RIP-ARGOFFSET
+ /*
+ * The iretq could re-enable interrupts:
+ */
+@@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
+ #endif
+
+ CFI_ENDPROC
+-END(common_interrupt)
++ENDPROC(common_interrupt)
+ /*
+ * End of kprobes section
+ */
+@@ -956,7 +1268,7 @@ ENTRY(\sym)
+ interrupt \do_sym
+ jmp ret_from_intr
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ #ifdef CONFIG_SMP
+@@ -1021,12 +1333,22 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call error_entry
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+ call \do_sym
+ jmp error_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ .macro paranoidzeroentry sym do_sym
+@@ -1038,15 +1360,25 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call save_paranoid
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+ call \do_sym
+ jmp paranoid_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
++#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
+ .macro paranoidzeroentry_ist sym do_sym ist
+ ENTRY(\sym)
+ INTR_FRAME
+@@ -1056,14 +1388,30 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call save_paranoid
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
++#ifdef CONFIG_SMP
++ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
++ lea init_tss(%r12), %r12
++#else
++ lea init_tss(%rip), %r12
++#endif
+ subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
+ call \do_sym
+ addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
+ jmp paranoid_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ .macro errorentry sym do_sym
+@@ -1074,13 +1422,23 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call error_entry
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ movq ORIG_RAX(%rsp),%rsi /* get error code */
+ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
+ call \do_sym
+ jmp error_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ /* error code is on the stack already */
+@@ -1093,13 +1451,23 @@ ENTRY(\sym)
+ call save_paranoid
+ DEFAULT_FRAME 0
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ movq ORIG_RAX(%rsp),%rsi /* get error code */
+ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
+ call \do_sym
+ jmp paranoid_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ zeroentry divide_error do_divide_error
+@@ -1129,9 +1497,10 @@ gs_change:
+ 2: mfence /* workaround */
+ SWAPGS
+ popfq_cfi
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(native_load_gs_index)
++ENDPROC(native_load_gs_index)
+
+ .section __ex_table,"a"
+ .align 8
+@@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
+ * Here we are in the child and the registers are set as they were
+ * at kernel_thread() invocation in the parent.
+ */
++ pax_force_fptr %rsi
+ call *%rsi
+ # exit
+ mov %eax, %edi
+ call do_exit
+ ud2 # padding for call trace
+ CFI_ENDPROC
+-END(kernel_thread_helper)
++ENDPROC(kernel_thread_helper)
+
+ /*
+ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
+@@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
+ RESTORE_REST
+ testq %rax,%rax
+ je int_ret_from_sys_call
+- RESTORE_ARGS
+ UNFAKE_STACK_FRAME
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(kernel_execve)
++ENDPROC(kernel_execve)
+
+ /* Call softirq on interrupt stack. Interrupts are off. */
+ ENTRY(call_softirq)
+@@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
+ CFI_DEF_CFA_REGISTER rsp
+ CFI_ADJUST_CFA_OFFSET -8
+ decl PER_CPU_VAR(irq_count)
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(call_softirq)
++ENDPROC(call_softirq)
+
+ #ifdef CONFIG_XEN
+ zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
+@@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+ decl PER_CPU_VAR(irq_count)
+ jmp error_exit
+ CFI_ENDPROC
+-END(xen_do_hypervisor_callback)
++ENDPROC(xen_do_hypervisor_callback)
+
+ /*
+ * Hypervisor uses this for application faults while it executes.
+@@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
+ SAVE_ALL
+ jmp error_exit
+ CFI_ENDPROC
+-END(xen_failsafe_callback)
++ENDPROC(xen_failsafe_callback)
+
+ apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
+ xen_hvm_callback_vector xen_evtchn_do_upcall
+@@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
+ TRACE_IRQS_OFF
+ testl %ebx,%ebx /* swapgs needed? */
+ jnz paranoid_restore
+- testl $3,CS(%rsp)
++ testb $3,CS(%rsp)
+ jnz paranoid_userspace
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel
++ TRACE_IRQS_IRETQ 0
++ SWAPGS_UNSAFE_STACK
++ RESTORE_ALL 8
++ pax_force_retaddr_bts
++ jmp irq_return
++#endif
+ paranoid_swapgs:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel_user
++#else
++ pax_exit_kernel
++#endif
+ TRACE_IRQS_IRETQ 0
+ SWAPGS_UNSAFE_STACK
+ RESTORE_ALL 8
+ jmp irq_return
+ paranoid_restore:
++ pax_exit_kernel
+ TRACE_IRQS_IRETQ 0
+ RESTORE_ALL 8
++ pax_force_retaddr_bts
+ jmp irq_return
+ paranoid_userspace:
+ GET_THREAD_INFO(%rcx)
+@@ -1394,7 +1780,7 @@ paranoid_schedule:
+ TRACE_IRQS_OFF
+ jmp paranoid_userspace
+ CFI_ENDPROC
+-END(paranoid_exit)
++ENDPROC(paranoid_exit)
+
+ /*
+ * Exception entry point. This expects an error code/orig_rax on the stack.
+@@ -1421,12 +1807,13 @@ ENTRY(error_entry)
+ movq_cfi r14, R14+8
+ movq_cfi r15, R15+8
+ xorl %ebx,%ebx
+- testl $3,CS+8(%rsp)
++ testb $3,CS+8(%rsp)
+ je error_kernelspace
+ error_swapgs:
+ SWAPGS
+ error_sti:
+ TRACE_IRQS_OFF
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -1453,7 +1840,7 @@ bstep_iret:
+ movq %rcx,RIP+8(%rsp)
+ jmp error_swapgs
+ CFI_ENDPROC
+-END(error_entry)
++ENDPROC(error_entry)
+
+
+ /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
+@@ -1473,7 +1860,7 @@ ENTRY(error_exit)
+ jnz retint_careful
+ jmp retint_swapgs
+ CFI_ENDPROC
+-END(error_exit)
++ENDPROC(error_exit)
+
+
+ /* runs on exception stack */
+@@ -1485,6 +1872,16 @@ ENTRY(nmi)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call save_paranoid
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+ movq %rsp,%rdi
+ movq $-1,%rsi
+@@ -1495,12 +1892,28 @@ ENTRY(nmi)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ testl %ebx,%ebx /* swapgs needed? */
+ jnz nmi_restore
+- testl $3,CS(%rsp)
++ testb $3,CS(%rsp)
+ jnz nmi_userspace
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel
++ SWAPGS_UNSAFE_STACK
++ RESTORE_ALL 8
++ pax_force_retaddr_bts
++ jmp irq_return
++#endif
+ nmi_swapgs:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel_user
++#else
++ pax_exit_kernel
++#endif
+ SWAPGS_UNSAFE_STACK
++ RESTORE_ALL 8
++ jmp irq_return
+ nmi_restore:
++ pax_exit_kernel
+ RESTORE_ALL 8
++ pax_force_retaddr_bts
+ jmp irq_return
+ nmi_userspace:
+ GET_THREAD_INFO(%rcx)
+@@ -1529,14 +1942,14 @@ nmi_schedule:
+ jmp paranoid_exit
+ CFI_ENDPROC
+ #endif
+-END(nmi)
++ENDPROC(nmi)
+
+ ENTRY(ignore_sysret)
+ CFI_STARTPROC
+ mov $-ENOSYS,%eax
+ sysret
+ CFI_ENDPROC
+-END(ignore_sysret)
++ENDPROC(ignore_sysret)
+
+ /*
+ * End of kprobes section
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index c9a281f..ce2f317 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
+ static const void *mod_code_newcode; /* holds the text to write to the IP */
+
+ static unsigned nmi_wait_count;
+-static atomic_t nmi_update_count = ATOMIC_INIT(0);
++static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
+
+ int ftrace_arch_read_dyn_info(char *buf, int size)
+ {
+@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
+
+ r = snprintf(buf, size, "%u %u",
+ nmi_wait_count,
+- atomic_read(&nmi_update_count));
++ atomic_read_unchecked(&nmi_update_count));
+ return r;
+ }
+
+@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
+
+ if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
+ smp_rmb();
++ pax_open_kernel();
+ ftrace_mod_code();
+- atomic_inc(&nmi_update_count);
++ pax_close_kernel();
++ atomic_inc_unchecked(&nmi_update_count);
+ }
+ /* Must have previous changes seen before executions */
+ smp_mb();
+@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+ {
+ unsigned char replaced[MCOUNT_INSN_SIZE];
+
++ ip = ktla_ktva(ip);
++
+ /*
+ * Note: Due to modules and __init, code can
+ * disappear and change, we need to protect against faulting
+@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
+ unsigned char old[MCOUNT_INSN_SIZE], *new;
+ int ret;
+
+- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
++ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = ftrace_modify_code(ip, old, new);
+
+@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
+ {
+ unsigned char code[MCOUNT_INSN_SIZE];
+
++ ip = ktla_ktva(ip);
++
+ if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
+index 3bb0850..55a56f4 100644
+--- a/arch/x86/kernel/head32.c
++++ b/arch/x86/kernel/head32.c
+@@ -19,6 +19,7 @@
+ #include <asm/io_apic.h>
+ #include <asm/bios_ebda.h>
+ #include <asm/tlbflush.h>
++#include <asm/boot.h>
+
+ static void __init i386_default_early_setup(void)
+ {
+@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
+ {
+ memblock_init();
+
+- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
++ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ /* Reserve INITRD */
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index ce0be7c..c41476e 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -25,6 +25,12 @@
+ /* Physical address */
+ #define pa(X) ((X) - __PAGE_OFFSET)
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) ((X) - __PAGE_OFFSET)
++#endif
++
+ /*
+ * References to members of the new_cpu_data structure.
+ */
+@@ -54,11 +60,7 @@
+ * and small than max_low_pfn, otherwise will waste some page table entries
+ */
+
+-#if PTRS_PER_PMD > 1
+-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
+-#else
+-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
+-#endif
++#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
+
+ /* Number of possible pages in the lowmem region */
+ LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
+@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
+ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+
+ /*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
+ * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
+ * %esi points to the real-mode code as a 32-bit pointer.
+ * CS and DS must be 4 GB flat segments, but we don't depend on
+@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+ * can.
+ */
+ __HEAD
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jmp startup_32
++/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
++.fill PAGE_SIZE-5,1,0xcc
++#endif
++
+ ENTRY(startup_32)
+ movl pa(stack_start),%ecx
+
+@@ -105,6 +120,57 @@ ENTRY(startup_32)
+ 2:
+ leal -__PAGE_OFFSET(%ecx),%esp
+
++#ifdef CONFIG_SMP
++ movl $pa(cpu_gdt_table),%edi
++ movl $__per_cpu_load,%eax
++ movw %ax,__KERNEL_PERCPU + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_PERCPU + 4(%edi)
++ movb %ah,__KERNEL_PERCPU + 7(%edi)
++ movl $__per_cpu_end - 1,%eax
++ subl $__per_cpu_start,%eax
++ movw %ax,__KERNEL_PERCPU + 0(%edi)
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ movl $NR_CPUS,%ecx
++ movl $pa(cpu_gdt_table),%edi
++1:
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
++ addl $PAGE_SIZE_asm,%edi
++ loop 1b
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ movl $pa(boot_gdt),%edi
++ movl $__LOAD_PHYSICAL_ADDR,%eax
++ movw %ax,__BOOT_CS + 2(%edi)
++ rorl $16,%eax
++ movb %al,__BOOT_CS + 4(%edi)
++ movb %ah,__BOOT_CS + 7(%edi)
++ rorl $16,%eax
++
++ ljmp $(__BOOT_CS),$1f
++1:
++
++ movl $NR_CPUS,%ecx
++ movl $pa(cpu_gdt_table),%edi
++ addl $__PAGE_OFFSET,%eax
++1:
++ movw %ax,__KERNEL_CS + 2(%edi)
++ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_CS + 4(%edi)
++ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
++ movb %ah,__KERNEL_CS + 7(%edi)
++ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
++ rorl $16,%eax
++ addl $PAGE_SIZE_asm,%edi
++ loop 1b
++#endif
++
+ /*
+ * Clear BSS first so that there are no surprises...
+ */
+@@ -195,8 +261,11 @@ ENTRY(startup_32)
+ movl %eax, pa(max_pfn_mapped)
+
+ /* Do early initialization of the fixmap area */
+- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#ifdef CONFIG_COMPAT_VDSO
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#else
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#endif
+ #else /* Not PAE */
+
+ page_pde_offset = (__PAGE_OFFSET >> 20);
+@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+ movl %eax, pa(max_pfn_mapped)
+
+ /* Do early initialization of the fixmap area */
+- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(initial_page_table+0xffc)
++#ifdef CONFIG_COMPAT_VDSO
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
++#else
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
++#endif
+ #endif
+
+ #ifdef CONFIG_PARAVIRT
+@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+ cmpl $num_subarch_entries, %eax
+ jae bad_subarch
+
+- movl pa(subarch_entries)(,%eax,4), %eax
+- subl $__PAGE_OFFSET, %eax
+- jmp *%eax
++ jmp *pa(subarch_entries)(,%eax,4)
+
+ bad_subarch:
+ WEAK(lguest_entry)
+@@ -255,10 +325,10 @@ WEAK(xen_entry)
+ __INITDATA
+
+ subarch_entries:
+- .long default_entry /* normal x86/PC */
+- .long lguest_entry /* lguest hypervisor */
+- .long xen_entry /* Xen hypervisor */
+- .long default_entry /* Moorestown MID */
++ .long ta(default_entry) /* normal x86/PC */
++ .long ta(lguest_entry) /* lguest hypervisor */
++ .long ta(xen_entry) /* Xen hypervisor */
++ .long ta(default_entry) /* Moorestown MID */
+ num_subarch_entries = (. - subarch_entries) / 4
+ .previous
+ #else
+@@ -312,6 +382,7 @@ default_entry:
+ orl %edx,%eax
+ movl %eax,%cr4
+
++#ifdef CONFIG_X86_PAE
+ testb $X86_CR4_PAE, %al # check if PAE is enabled
+ jz 6f
+
+@@ -340,6 +411,9 @@ default_entry:
+ /* Make changes effective */
+ wrmsr
+
++ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
++#endif
++
+ 6:
+
+ /*
+@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
+ 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
+ movl %eax,%ss # after changing gdt.
+
+- movl $(__USER_DS),%eax # DS/ES contains default USER segment
++# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
+ movl %eax,%ds
+ movl %eax,%es
+
+@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
+ */
+ cmpb $0,ready
+ jne 1f
+- movl $gdt_page,%eax
++ movl $cpu_gdt_table,%eax
+ movl $stack_canary,%ecx
++#ifdef CONFIG_SMP
++ addl $__per_cpu_load,%ecx
++#endif
+ movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
+ shrl $16, %ecx
+ movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
+ movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
+ 1:
+-#endif
+ movl $(__KERNEL_STACK_CANARY),%eax
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS),%eax
++#else
++ xorl %eax,%eax
++#endif
+ movl %eax,%gs
+
+ xorl %eax,%eax # Clear LDT
+@@ -558,22 +639,22 @@ early_page_fault:
+ jmp early_fault
+
+ early_fault:
+- cld
+ #ifdef CONFIG_PRINTK
++ cmpl $1,%ss:early_recursion_flag
++ je hlt_loop
++ incl %ss:early_recursion_flag
++ cld
+ pusha
+ movl $(__KERNEL_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+- cmpl $2,early_recursion_flag
+- je hlt_loop
+- incl early_recursion_flag
+ movl %cr2,%eax
+ pushl %eax
+ pushl %edx /* trapno */
+ pushl $fault_msg
+ call printk
++; call dump_stack
+ #endif
+- call dump_stack
+ hlt_loop:
+ hlt
+ jmp hlt_loop
+@@ -581,8 +662,11 @@ hlt_loop:
+ /* This is the default interrupt "handler" :-) */
+ ALIGN
+ ignore_int:
+- cld
+ #ifdef CONFIG_PRINTK
++ cmpl $2,%ss:early_recursion_flag
++ je hlt_loop
++ incl %ss:early_recursion_flag
++ cld
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+@@ -591,9 +675,6 @@ ignore_int:
+ movl $(__KERNEL_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+- cmpl $2,early_recursion_flag
+- je hlt_loop
+- incl early_recursion_flag
+ pushl 16(%esp)
+ pushl 24(%esp)
+ pushl 32(%esp)
+@@ -622,29 +703,43 @@ ENTRY(initial_code)
+ /*
+ * BSS section
+ */
+-__PAGE_ALIGNED_BSS
+- .align PAGE_SIZE
+ #ifdef CONFIG_X86_PAE
++.section .initial_pg_pmd,"a",@progbits
+ initial_pg_pmd:
+ .fill 1024*KPMDS,4,0
+ #else
++.section .initial_page_table,"a",@progbits
+ ENTRY(initial_page_table)
+ .fill 1024,4,0
+ #endif
++.section .initial_pg_fixmap,"a",@progbits
+ initial_pg_fixmap:
+ .fill 1024,4,0
++.section .empty_zero_page,"a",@progbits
+ ENTRY(empty_zero_page)
+ .fill 4096,1,0
++.section .swapper_pg_dir,"a",@progbits
+ ENTRY(swapper_pg_dir)
++#ifdef CONFIG_X86_PAE
++ .fill 4,8,0
++#else
+ .fill 1024,4,0
++#endif
++
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++.section .idt,"a",@progbits
++ENTRY(idt_table)
++ .fill 256,8,0
+
+ /*
+ * This starts the data section.
+ */
+ #ifdef CONFIG_X86_PAE
+-__PAGE_ALIGNED_DATA
+- /* Page-aligned for the benefit of paravirt? */
+- .align PAGE_SIZE
++.section .initial_page_table,"a",@progbits
+ ENTRY(initial_page_table)
+ .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
+ # if KPMDS == 3
+@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
+ # error "Kernel PMDs should be 1, 2 or 3"
+ # endif
+ .align PAGE_SIZE /* needs to be page-sized too */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ENTRY(cpu_pgd)
++ .rept NR_CPUS
++ .fill 4,8,0
++ .endr
++#endif
++
+ #endif
+
+ .data
+ .balign 4
+ ENTRY(stack_start)
+- .long init_thread_union+THREAD_SIZE
++ .long init_thread_union+THREAD_SIZE-8
+
++ready: .byte 0
++
++.section .rodata,"a",@progbits
+ early_recursion_flag:
+ .long 0
+
+-ready: .byte 0
+-
+ int_msg:
+ .asciz "Unknown interrupt or fault at: %p %p %p\n"
+
+@@ -707,7 +811,7 @@ fault_msg:
+ .word 0 # 32 bit align gdt_desc.address
+ boot_gdt_descr:
+ .word __BOOT_DS+7
+- .long boot_gdt - __PAGE_OFFSET
++ .long pa(boot_gdt)
+
+ .word 0 # 32-bit align idt_desc.address
+ idt_descr:
+@@ -718,7 +822,7 @@ idt_descr:
+ .word 0 # 32 bit align gdt_desc.address
+ ENTRY(early_gdt_descr)
+ .word GDT_ENTRIES*8-1
+- .long gdt_page /* Overwritten for secondary CPUs */
++ .long cpu_gdt_table /* Overwritten for secondary CPUs */
+
+ /*
+ * The boot_gdt must mirror the equivalent in setup.S and is
+@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
+ .align L1_CACHE_BYTES
+ ENTRY(boot_gdt)
+ .fill GDT_ENTRY_BOOT_CS,8,0
+- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
+- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
++ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
++
++ .align PAGE_SIZE_asm
++ENTRY(cpu_gdt_table)
++ .rept NR_CPUS
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0000000000000000 /* 0x0b reserved */
++ .quad 0x0000000000000000 /* 0x13 reserved */
++ .quad 0x0000000000000000 /* 0x1b reserved */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
++#else
++ .quad 0x0000000000000000 /* 0x20 unused */
++#endif
++
++ .quad 0x0000000000000000 /* 0x28 unused */
++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
++ .quad 0x0000000000000000 /* 0x4b reserved */
++ .quad 0x0000000000000000 /* 0x53 reserved */
++ .quad 0x0000000000000000 /* 0x5b reserved */
++
++ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
++
++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
++
++ /*
++ * Segments used for calling PnP BIOS have byte granularity.
++ * The code segments and data segments have fixed 64k limits,
++ * the transfer segment sizes are set at run time.
++ */
++ .quad 0x00409b000000ffff /* 0x90 32-bit code */
++ .quad 0x00009b000000ffff /* 0x98 16-bit code */
++ .quad 0x000093000000ffff /* 0xa0 16-bit data */
++ .quad 0x0000930000000000 /* 0xa8 16-bit data */
++ .quad 0x0000930000000000 /* 0xb0 16-bit data */
++
++ /*
++ * The APM segments have byte granularity and their bases
++ * are set at run time. All have 64k limits.
++ */
++ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
++ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x004093000000ffff /* 0xc8 APM DS data */
++
++ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
++ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
++ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
++ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
++ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
++
++ /* Be sure this is zeroed to avoid false validations in Xen */
++ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
++ .endr
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index e11e394..9aebc5d 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -19,6 +19,8 @@
+ #include <asm/cache.h>
+ #include <asm/processor-flags.h>
+ #include <asm/percpu.h>
++#include <asm/cpufeature.h>
++#include <asm/alternative-asm.h>
+
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/asm-offsets.h>
+@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
+ L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
+ L4_START_KERNEL = pgd_index(__START_KERNEL_map)
+ L3_START_KERNEL = pud_index(__START_KERNEL_map)
++L4_VMALLOC_START = pgd_index(VMALLOC_START)
++L3_VMALLOC_START = pud_index(VMALLOC_START)
++L4_VMALLOC_END = pgd_index(VMALLOC_END)
++L3_VMALLOC_END = pud_index(VMALLOC_END)
++L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
++L3_VMEMMAP_START = pud_index(VMEMMAP_START)
+
+ .text
+ __HEAD
+@@ -85,35 +93,23 @@ startup_64:
+ */
+ addq %rbp, init_level4_pgt + 0(%rip)
+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
+
+ addq %rbp, level3_ident_pgt + 0(%rip)
++#ifndef CONFIG_XEN
++ addq %rbp, level3_ident_pgt + 8(%rip)
++#endif
+
+- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
+- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
++ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
++
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
+
+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
+-
+- /* Add an Identity mapping if I am above 1G */
+- leaq _text(%rip), %rdi
+- andq $PMD_PAGE_MASK, %rdi
+-
+- movq %rdi, %rax
+- shrq $PUD_SHIFT, %rax
+- andq $(PTRS_PER_PUD - 1), %rax
+- jz ident_complete
+-
+- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
+- leaq level3_ident_pgt(%rip), %rbx
+- movq %rdx, 0(%rbx, %rax, 8)
+-
+- movq %rdi, %rax
+- shrq $PMD_SHIFT, %rax
+- andq $(PTRS_PER_PMD - 1), %rax
+- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
+- leaq level2_spare_pgt(%rip), %rbx
+- movq %rdx, 0(%rbx, %rax, 8)
+-ident_complete:
++ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
+
+ /*
+ * Fixup the kernel text+data virtual addresses. Note that
+@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
+ * after the boot processor executes this code.
+ */
+
+- /* Enable PAE mode and PGE */
+- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
++ /* Enable PAE mode and PSE/PGE */
++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
+ movq %rax, %cr4
+
+ /* Setup early boot stage 4 level pagetables. */
+@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_SCE, %eax /* Enable System Call */
+- btl $20,%edi /* No Execute supported? */
++ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
+ jnc 1f
+ btsl $_EFER_NX, %eax
++ leaq init_level4_pgt(%rip), %rdi
++#ifndef CONFIG_EFI
++ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
++#endif
++ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
++ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
++ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
++ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
+ 1: wrmsr /* Make changes effective */
+
+ /* Setup cr0 */
+@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
+ * jump. In addition we need to ensure %cs is set so we make this
+ * a far return.
+ */
++ pax_set_fptr_mask
+ movq initial_code(%rip),%rax
+ pushq $0 # fake return address to stop unwinder
+ pushq $__KERNEL_CS # set correct cs
+@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
+ bad_address:
+ jmp bad_address
+
+- .section ".init.text","ax"
++ __INIT
+ #ifdef CONFIG_EARLY_PRINTK
+ .globl early_idt_handlers
+ early_idt_handlers:
+@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
+ #endif /* EARLY_PRINTK */
+ 1: hlt
+ jmp 1b
++ .previous
+
+ #ifdef CONFIG_EARLY_PRINTK
++ __INITDATA
+ early_recursion_flag:
+ .long 0
++ .previous
+
++ .section .rodata,"a",@progbits
+ early_idt_msg:
+ .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
+ early_idt_ripmsg:
+ .asciz "RIP %s\n"
++ .previous
+ #endif /* CONFIG_EARLY_PRINTK */
+- .previous
+
++ .section .rodata,"a",@progbits
+ #define NEXT_PAGE(name) \
+ .balign PAGE_SIZE; \
+ ENTRY(name)
+@@ -338,7 +348,6 @@ ENTRY(name)
+ i = i + 1 ; \
+ .endr
+
+- .data
+ /*
+ * This default setting generates an ident mapping at address 0x100000
+ * and a mapping for the kernel that precisely maps virtual address
+@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMALLOC_START*8, 0
++ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMALLOC_END*8, 0
++ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
++ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_START_KERNEL*8, 0
+ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++NEXT_PAGE(cpu_pgd)
++ .rept NR_CPUS
++ .fill 512,8,0
++ .endr
++#endif
++
+ NEXT_PAGE(level3_ident_pgt)
+ .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++#ifdef CONFIG_XEN
+ .fill 511,8,0
++#else
++ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
++ .fill 510,8,0
++#endif
++
++NEXT_PAGE(level3_vmalloc_start_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level3_vmalloc_end_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level3_vmemmap_pgt)
++ .fill L3_VMEMMAP_START,8,0
++ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+
+ NEXT_PAGE(level3_kernel_pgt)
+ .fill L3_START_KERNEL,8,0
+@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
+ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+
++NEXT_PAGE(level2_vmemmap_pgt)
++ .fill 512,8,0
++
+ NEXT_PAGE(level2_fixmap_pgt)
+- .fill 506,8,0
+- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
+- .fill 5,8,0
++ .fill 507,8,0
++ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
++ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
++ .fill 4,8,0
+
+-NEXT_PAGE(level1_fixmap_pgt)
++NEXT_PAGE(level1_vsyscall_pgt)
+ .fill 512,8,0
+
+-NEXT_PAGE(level2_ident_pgt)
+- /* Since I easily can, map the first 1G.
++ /* Since I easily can, map the first 2G.
+ * Don't set NX because code runs from these pages.
+ */
+- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
++NEXT_PAGE(level2_ident_pgt)
++ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
+
+ NEXT_PAGE(level2_kernel_pgt)
+ /*
+@@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
+ * If you want to increase this then increase MODULES_VADDR
+ * too.)
+ */
+- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
+- KERNEL_IMAGE_SIZE/PMD_SIZE)
+-
+-NEXT_PAGE(level2_spare_pgt)
+- .fill 512, 8, 0
++ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
+
+ #undef PMDS
+ #undef NEXT_PAGE
+
+- .data
++ .align PAGE_SIZE
++ENTRY(cpu_gdt_table)
++ .rept NR_CPUS
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
++ .quad 0x00cffb000000ffff /* __USER32_CS */
++ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
++ .quad 0x00affb000000ffff /* __USER_CS */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
++#else
++ .quad 0x0 /* unused */
++#endif
++
++ .quad 0,0 /* TSS */
++ .quad 0,0 /* LDT */
++ .quad 0,0,0 /* three TLS descriptors */
++ .quad 0x0000f40000000000 /* node/CPU stored in limit */
++ /* asm/segment.h:GDT_ENTRIES must match this */
++
++ /* zero the remaining page */
++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++ .endr
++
+ .align 16
+ .globl early_gdt_descr
+ early_gdt_descr:
+ .word GDT_ENTRIES*8-1
+ early_gdt_descr_base:
+- .quad INIT_PER_CPU_VAR(gdt_page)
++ .quad cpu_gdt_table
+
+ ENTRY(phys_base)
+ /* This must match the first entry in level2_kernel_pgt */
+ .quad 0x0000000000000000
+
+ #include "../../x86/xen/xen-head.S"
+-
+- .section .bss, "aw", @nobits
++
++ .section .rodata,"a",@progbits
+ .align L1_CACHE_BYTES
+ ENTRY(idt_table)
+- .skip IDT_ENTRIES * 16
++ .fill 512,8,0
+
+ __PAGE_ALIGNED_BSS
+ .align PAGE_SIZE
+diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
+index 9c3bd4a..e1d9b35 100644
+--- a/arch/x86/kernel/i386_ksyms_32.c
++++ b/arch/x86/kernel/i386_ksyms_32.c
+@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
+ EXPORT_SYMBOL(cmpxchg8b_emu);
+ #endif
+
++EXPORT_SYMBOL_GPL(cpu_gdt_table);
++
+ /* Networking helper routines. */
+ EXPORT_SYMBOL(csum_partial_copy_generic);
++EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
++EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
+
+ EXPORT_SYMBOL(__get_user_1);
+ EXPORT_SYMBOL(__get_user_2);
+@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
+
+ EXPORT_SYMBOL(csum_partial);
+ EXPORT_SYMBOL(empty_zero_page);
++
++#ifdef CONFIG_PAX_KERNEXEC
++EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
++#endif
+diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
+index 6104852..6114160 100644
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -210,7 +210,7 @@ spurious_8259A_irq:
+ "spurious 8259A interrupt: IRQ%d.\n", irq);
+ spurious_irq_mask |= irqmask;
+ }
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ /*
+ * Theoretically we do not have to handle this IRQ,
+ * but in Linux this does not cause problems and is
+diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
+index 43e9ccf..44ccf6f 100644
+--- a/arch/x86/kernel/init_task.c
++++ b/arch/x86/kernel/init_task.c
+@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+-union thread_union init_thread_union __init_task_data =
+- { INIT_THREAD_INFO(init_task) };
++union thread_union init_thread_union __init_task_data;
+
+ /*
+ * Initial task structure.
+@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
+-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+-
++struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
++EXPORT_SYMBOL(init_tss);
+diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
+index 8c96897..be66bfa 100644
+--- a/arch/x86/kernel/ioport.c
++++ b/arch/x86/kernel/ioport.c
+@@ -6,6 +6,7 @@
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/ioport.h>
+@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+
+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
+ return -EINVAL;
++#ifdef CONFIG_GRKERNSEC_IO
++ if (turn_on && grsec_disable_privio) {
++ gr_handle_ioperm();
++ return -EPERM;
++ }
++#endif
+ if (turn_on && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+ * because the ->io_bitmap_max value must match the bitmap
+ * contents:
+ */
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+
+ if (turn_on)
+ bitmap_clear(t->io_bitmap_ptr, from, num);
+@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
+ return -EINVAL;
+ /* Trying to gain more privileges? */
+ if (level > old) {
++#ifdef CONFIG_GRKERNSEC_IO
++ if (grsec_disable_privio) {
++ gr_handle_iopl();
++ return -EPERM;
++ }
++#endif
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ }
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 429e0c9..17b3ece 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -18,7 +18,7 @@
+ #include <asm/mce.h>
+ #include <asm/hw_irq.h>
+
+-atomic_t irq_err_count;
++atomic_unchecked_t irq_err_count;
+
+ /* Function pointer for generic interrupt vector handling */
+ void (*x86_platform_ipi_callback)(void) = NULL;
+@@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
+ seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
+ seq_printf(p, " Machine check polls\n");
+ #endif
+- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
++ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
+ #if defined(CONFIG_X86_IO_APIC)
+- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
++ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
+ #endif
+ return 0;
+ }
+@@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
+
+ u64 arch_irq_stat(void)
+ {
+- u64 sum = atomic_read(&irq_err_count);
++ u64 sum = atomic_read_unchecked(&irq_err_count);
+
+ #ifdef CONFIG_X86_IO_APIC
+- sum += atomic_read(&irq_mis_count);
++ sum += atomic_read_unchecked(&irq_mis_count);
+ #endif
+ return sum;
+ }
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index 7209070..cbcd71a 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
+ __asm__ __volatile__("andl %%esp,%0" :
+ "=r" (sp) : "0" (THREAD_SIZE - 1));
+
+- return sp < (sizeof(struct thread_info) + STACK_WARN);
++ return sp < STACK_WARN;
+ }
+
+ static void print_stack_overflow(void)
+@@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
+ * per-CPU IRQ handling contexts (thread information and stack)
+ */
+ union irq_ctx {
+- struct thread_info tinfo;
+- u32 stack[THREAD_SIZE/sizeof(u32)];
++ unsigned long previous_esp;
++ u32 stack[THREAD_SIZE/sizeof(u32)];
+ } __attribute__((aligned(THREAD_SIZE)));
+
+ static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
+@@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
+ static inline int
+ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ {
+- union irq_ctx *curctx, *irqctx;
++ union irq_ctx *irqctx;
+ u32 *isp, arg1, arg2;
+
+- curctx = (union irq_ctx *) current_thread_info();
+ irqctx = __this_cpu_read(hardirq_ctx);
+
+ /*
+@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ * handler) we can't do that and just have to keep using the
+ * current stack (which is the irq stack already after all)
+ */
+- if (unlikely(curctx == irqctx))
++ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
+ return 0;
+
+ /* build the stack frame on the IRQ stack */
+- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
+- irqctx->tinfo.task = curctx->tinfo.task;
+- irqctx->tinfo.previous_esp = current_stack_pointer;
++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
++ irqctx->previous_esp = current_stack_pointer;
+
+- /*
+- * Copy the softirq bits in preempt_count so that the
+- * softirq checks work in the hardirq context.
+- */
+- irqctx->tinfo.preempt_count =
+- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(MAKE_MM_SEG(0));
++#endif
+
+ if (unlikely(overflow))
+ call_on_stack(print_stack_overflow, isp);
+@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ : "0" (irq), "1" (desc), "2" (isp),
+ "D" (desc->handle_irq)
+ : "memory", "cc", "ecx");
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ return 1;
+ }
+
+@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ */
+ void __cpuinit irq_ctx_init(int cpu)
+ {
+- union irq_ctx *irqctx;
+-
+ if (per_cpu(hardirq_ctx, cpu))
+ return;
+
+- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+- THREAD_FLAGS,
+- THREAD_ORDER));
+- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
+- irqctx->tinfo.cpu = cpu;
+- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
+- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+-
+- per_cpu(hardirq_ctx, cpu) = irqctx;
+-
+- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+- THREAD_FLAGS,
+- THREAD_ORDER));
+- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
+- irqctx->tinfo.cpu = cpu;
+- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+-
+- per_cpu(softirq_ctx, cpu) = irqctx;
++ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
++ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
+
+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
+@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
+ asmlinkage void do_softirq(void)
+ {
+ unsigned long flags;
+- struct thread_info *curctx;
+ union irq_ctx *irqctx;
+ u32 *isp;
+
+@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
+ local_irq_save(flags);
+
+ if (local_softirq_pending()) {
+- curctx = current_thread_info();
+ irqctx = __this_cpu_read(softirq_ctx);
+- irqctx->tinfo.task = curctx->task;
+- irqctx->tinfo.previous_esp = current_stack_pointer;
++ irqctx->previous_esp = current_stack_pointer;
+
+ /* build the stack frame on the softirq stack */
+- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(MAKE_MM_SEG(0));
++#endif
+
+ call_on_stack(__do_softirq, isp);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ /*
+ * Shouldn't happen, we returned above if in_interrupt():
+ */
+diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
+index 69bca46..0bac999 100644
+--- a/arch/x86/kernel/irq_64.c
++++ b/arch/x86/kernel/irq_64.c
+@@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+ u64 curbase = (u64)task_stack_page(current);
+
+- if (user_mode_vm(regs))
++ if (user_mode(regs))
+ return;
+
+ WARN_ONCE(regs->sp >= curbase &&
+diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
+index faba577..93b9e71 100644
+--- a/arch/x86/kernel/kgdb.c
++++ b/arch/x86/kernel/kgdb.c
+@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+ #ifdef CONFIG_X86_32
+ switch (regno) {
+ case GDB_SS:
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ *(unsigned long *)mem = __KERNEL_DS;
+ break;
+ case GDB_SP:
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ *(unsigned long *)mem = kernel_stack_pointer(regs);
+ break;
+ case GDB_GS:
+@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+ case 'k':
+ /* clear the trace bit */
+ linux_regs->flags &= ~X86_EFLAGS_TF;
+- atomic_set(&kgdb_cpu_doing_single_step, -1);
++ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
+
+ /* set the trace bit if we're stepping */
+ if (remcomInBuffer[0] == 's') {
+ linux_regs->flags |= X86_EFLAGS_TF;
+- atomic_set(&kgdb_cpu_doing_single_step,
++ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
+ raw_smp_processor_id());
+ }
+
+@@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+
+ switch (cmd) {
+ case DIE_DEBUG:
+- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
+ if (user_mode(regs))
+ return single_step_cont(regs, args);
+ break;
+diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
+index 7da647d..5d3c4c1 100644
+--- a/arch/x86/kernel/kprobes.c
++++ b/arch/x86/kernel/kprobes.c
+@@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
+ } __attribute__((packed)) *insn;
+
+ insn = (struct __arch_relative_insn *)from;
++
++ pax_open_kernel();
+ insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
+ insn->op = op;
++ pax_close_kernel();
+ }
+
+ /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
+@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
+ kprobe_opcode_t opcode;
+ kprobe_opcode_t *orig_opcodes = opcodes;
+
+- if (search_exception_tables((unsigned long)opcodes))
++ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
+ return 0; /* Page fault may occur on this address. */
+
+ retry:
+@@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
+ }
+ }
+ insn_get_length(&insn);
++ pax_open_kernel();
+ memcpy(dest, insn.kaddr, insn.length);
++ pax_close_kernel();
+
+ #ifdef CONFIG_X86_64
+ if (insn_rip_relative(&insn)) {
+@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
+ (u8 *) dest;
+ BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
+ disp = (u8 *) dest + insn_offset_displacement(&insn);
++ pax_open_kernel();
+ *(s32 *) disp = (s32) newdisp;
++ pax_close_kernel();
+ }
+ #endif
+ return insn.length;
+@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
+ */
+ __copy_instruction(p->ainsn.insn, p->addr, 0);
+
+- if (can_boost(p->addr))
++ if (can_boost(ktla_ktva(p->addr)))
+ p->ainsn.boostable = 0;
+ else
+ p->ainsn.boostable = -1;
+
+- p->opcode = *p->addr;
++ p->opcode = *(ktla_ktva(p->addr));
+ }
+
+ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+ * nor set current_kprobe, because it doesn't use single
+ * stepping.
+ */
+- regs->ip = (unsigned long)p->ainsn.insn;
++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ preempt_enable_no_resched();
+ return;
+ }
+@@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+ if (p->opcode == BREAKPOINT_INSTRUCTION)
+ regs->ip = (unsigned long)p->addr;
+ else
+- regs->ip = (unsigned long)p->ainsn.insn;
++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ }
+
+ /*
+@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
+ setup_singlestep(p, regs, kcb, 0);
+ return 1;
+ }
+- } else if (*addr != BREAKPOINT_INSTRUCTION) {
++ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
+ " movq %rax, 152(%rsp)\n"
+ RESTORE_REGS_STRING
+ " popfq\n"
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
++ " btsq $63,(%rsp)\n"
++#endif
+ #else
+ " pushf\n"
+ SAVE_REGS_STRING
+@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+ {
+ unsigned long *tos = stack_addr(regs);
+- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
++ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ unsigned long orig_ip = (unsigned long)p->addr;
+ kprobe_opcode_t *insn = p->ainsn.insn;
+
+@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ struct die_args *args = data;
+ int ret = NOTIFY_DONE;
+
+- if (args->regs && user_mode_vm(args->regs))
++ if (args->regs && user_mode(args->regs))
+ return ret;
+
+ switch (val) {
+@@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
+ * Verify if the address gap is in 2GB range, because this uses
+ * a relative jump.
+ */
+- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
++ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
+ if (abs(rel) > 0x7fffffff)
+ return -ERANGE;
+
+@@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
+ synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
+
+ /* Set probe function call */
+- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
++ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
+
+ /* Set returning jmp instruction at the tail of out-of-line buffer */
+ synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
+- (u8 *)op->kp.addr + op->optinsn.size);
++ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
+
+ flush_icache_range((unsigned long) buf,
+ (unsigned long) buf + TMPL_END_IDX +
+@@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
+ ((long)op->kp.addr + RELATIVEJUMP_SIZE));
+
+ /* Backup instructions which will be replaced by jump address */
+- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
++ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
+ RELATIVE_ADDR_SIZE);
+
+ insn_buf[0] = RELATIVEJUMP_OPCODE;
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index a9c2116..a52d4fc 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
+ pv_mmu_ops.set_pud = kvm_set_pud;
+ #if PAGETABLE_LEVELS == 4
+ pv_mmu_ops.set_pgd = kvm_set_pgd;
++ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
+ #endif
+ #endif
+ pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index ea69726..604d066 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+ if (reload) {
+ #ifdef CONFIG_SMP
+ preempt_disable();
+- load_LDT(pc);
++ load_LDT_nolock(pc);
+ if (!cpumask_equal(mm_cpumask(current->mm),
+ cpumask_of(smp_processor_id())))
+ smp_call_function(flush_ldt, current->mm, 1);
+ preempt_enable();
+ #else
+- load_LDT(pc);
++ load_LDT_nolock(pc);
+ #endif
+ }
+ if (oldsize) {
+@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+ return err;
+
+ for (i = 0; i < old->size; i++)
+- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
++ write_ldt_entry(new->ldt, i, old->ldt + i);
+ return 0;
+ }
+
+@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ retval = copy_ldt(&mm->context, &old_mm->context);
+ mutex_unlock(&old_mm->context.lock);
+ }
++
++ if (tsk == current) {
++ mm->context.vdso = 0;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ mm->context.user_cs_base = 0UL;
++ mm->context.user_cs_limit = ~0UL;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpus_clear(mm->context.cpu_user_cs_mask);
++#endif
++
++#endif
++#endif
++
++ }
++
+ return retval;
+ }
+
+@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
++ error = -EINVAL;
++ goto out_unlock;
++ }
++#endif
++
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
+diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
+index a3fa43b..8966f4c 100644
+--- a/arch/x86/kernel/machine_kexec_32.c
++++ b/arch/x86/kernel/machine_kexec_32.c
+@@ -27,7 +27,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/debugreg.h>
+
+-static void set_idt(void *newidt, __u16 limit)
++static void set_idt(struct desc_struct *newidt, __u16 limit)
+ {
+ struct desc_ptr curidt;
+
+@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
+ }
+
+
+-static void set_gdt(void *newgdt, __u16 limit)
++static void set_gdt(struct desc_struct *newgdt, __u16 limit)
+ {
+ struct desc_ptr curgdt;
+
+@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
+ }
+
+ control_page = page_address(image->control_code_page);
+- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
++ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
+
+ relocate_kernel_ptr = control_page;
+ page_list[PA_CONTROL_PAGE] = __pa(control_page);
+diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
+index 3ca42d0..7cff8cc 100644
+--- a/arch/x86/kernel/microcode_intel.c
++++ b/arch/x86/kernel/microcode_intel.c
+@@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
+
+ static int get_ucode_user(void *to, const void *from, size_t n)
+ {
+- return copy_from_user(to, from, n);
++ return copy_from_user(to, (const void __force_user *)from, n);
+ }
+
+ static enum ucode_state
+ request_microcode_user(int cpu, const void __user *buf, size_t size)
+ {
+- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
++ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
+ }
+
+ static void microcode_fini_cpu(int cpu)
+diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
+index 925179f..85bec6c 100644
+--- a/arch/x86/kernel/module.c
++++ b/arch/x86/kernel/module.c
+@@ -36,15 +36,60 @@
+ #define DEBUGP(fmt...)
+ #endif
+
+-void *module_alloc(unsigned long size)
++static inline void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
+ if (PAGE_ALIGN(size) > MODULES_LEN)
+ return NULL;
+ return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
++ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
+ -1, __builtin_return_address(0));
+ }
+
++void *module_alloc(unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return __module_alloc(size, PAGE_KERNEL);
++#else
++ return __module_alloc(size, PAGE_KERNEL_EXEC);
++#endif
++
++}
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++void *module_alloc_exec(unsigned long size)
++{
++ struct vm_struct *area;
++
++ if (size == 0)
++ return NULL;
++
++ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
++ return area ? area->addr : NULL;
++}
++EXPORT_SYMBOL(module_alloc_exec);
++
++void module_free_exec(struct module *mod, void *module_region)
++{
++ vunmap(module_region);
++}
++EXPORT_SYMBOL(module_free_exec);
++#else
++void module_free_exec(struct module *mod, void *module_region)
++{
++ module_free(mod, module_region);
++}
++EXPORT_SYMBOL(module_free_exec);
++
++void *module_alloc_exec(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL_RX);
++}
++EXPORT_SYMBOL(module_alloc_exec);
++#endif
++#endif
++
+ #ifdef CONFIG_X86_32
+ int apply_relocate(Elf32_Shdr *sechdrs,
+ const char *strtab,
+@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ unsigned int i;
+ Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+- uint32_t *location;
++ uint32_t *plocation, location;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+- + rel[i].r_offset;
++ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
++ location = (uint32_t)plocation;
++ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
++ plocation = ktla_ktva((void *)plocation);
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_386_32:
+ /* We add the value into the location given */
+- *location += sym->st_value;
++ pax_open_kernel();
++ *plocation += sym->st_value;
++ pax_close_kernel();
+ break;
+ case R_386_PC32:
+ /* Add the value, subtract its postition */
+- *location += sym->st_value - (uint32_t)location;
++ pax_open_kernel();
++ *plocation += sym->st_value - location;
++ pax_close_kernel();
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ case R_X86_64_NONE:
+ break;
+ case R_X86_64_64:
++ pax_open_kernel();
+ *(u64 *)loc = val;
++ pax_close_kernel();
+ break;
+ case R_X86_64_32:
++ pax_open_kernel();
+ *(u32 *)loc = val;
++ pax_close_kernel();
+ if (val != *(u32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_32S:
++ pax_open_kernel();
+ *(s32 *)loc = val;
++ pax_close_kernel();
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_PC32:
+ val -= (u64)loc;
++ pax_open_kernel();
+ *(u32 *)loc = val;
++ pax_close_kernel();
++
+ #if 0
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index e88f37b..1353db6 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
+ dotraplinkage notrace __kprobes void
+ do_nmi(struct pt_regs *regs, long error_code)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (!user_mode(regs)) {
++ unsigned long cs = regs->cs & 0xFFFF;
++ unsigned long ip = ktva_ktla(regs->ip);
++
++ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
++ regs->ip = ip;
++ }
++#endif
++
+ nmi_enter();
+
+ inc_irq_stat(__nmi_count);
+diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
+index 676b8c7..870ba04 100644
+--- a/arch/x86/kernel/paravirt-spinlocks.c
++++ b/arch/x86/kernel/paravirt-spinlocks.c
+@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+ arch_spin_lock(lock);
+ }
+
+-struct pv_lock_ops pv_lock_ops = {
++struct pv_lock_ops pv_lock_ops __read_only = {
+ #ifdef CONFIG_SMP
+ .spin_is_locked = __ticket_spin_is_locked,
+ .spin_is_contended = __ticket_spin_is_contended,
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index d90272e..6bb013b 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
+ {
+ return x;
+ }
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
++PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
++#endif
+
+ void __init default_banner(void)
+ {
+@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+ if (opfunc == NULL)
+ /* If there's no function, patch it with a ud2a (BUG) */
+ ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
+- else if (opfunc == _paravirt_nop)
++ else if (opfunc == (void *)_paravirt_nop)
+ /* If the operation is a nop, then nop the callsite */
+ ret = paravirt_patch_nop();
+
+ /* identity functions just return their single argument */
+- else if (opfunc == _paravirt_ident_32)
++ else if (opfunc == (void *)_paravirt_ident_32)
+ ret = paravirt_patch_ident_32(insnbuf, len);
+- else if (opfunc == _paravirt_ident_64)
++ else if (opfunc == (void *)_paravirt_ident_64)
+ ret = paravirt_patch_ident_64(insnbuf, len);
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
++ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
++ ret = paravirt_patch_ident_64(insnbuf, len);
++#endif
+
+ else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
+ type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
+@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
+ if (insn_len > len || start == NULL)
+ insn_len = len;
+ else
+- memcpy(insnbuf, start, insn_len);
++ memcpy(insnbuf, ktla_ktva(start), insn_len);
+
+ return insn_len;
+ }
+@@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
+ preempt_enable();
+ }
+
+-struct pv_info pv_info = {
++struct pv_info pv_info __read_only = {
+ .name = "bare hardware",
+ .paravirt_enabled = 0,
+ .kernel_rpl = 0,
+@@ -313,16 +320,16 @@ struct pv_info pv_info = {
+ #endif
+ };
+
+-struct pv_init_ops pv_init_ops = {
++struct pv_init_ops pv_init_ops __read_only = {
+ .patch = native_patch,
+ };
+
+-struct pv_time_ops pv_time_ops = {
++struct pv_time_ops pv_time_ops __read_only = {
+ .sched_clock = native_sched_clock,
+ .steal_clock = native_steal_clock,
+ };
+
+-struct pv_irq_ops pv_irq_ops = {
++struct pv_irq_ops pv_irq_ops __read_only = {
+ .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
+ .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
+ .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
+@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
+ #endif
+ };
+
+-struct pv_cpu_ops pv_cpu_ops = {
++struct pv_cpu_ops pv_cpu_ops __read_only = {
+ .cpuid = native_cpuid,
+ .get_debugreg = native_get_debugreg,
+ .set_debugreg = native_set_debugreg,
+@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
+ .end_context_switch = paravirt_nop,
+ };
+
+-struct pv_apic_ops pv_apic_ops = {
++struct pv_apic_ops pv_apic_ops __read_only = {
+ #ifdef CONFIG_X86_LOCAL_APIC
+ .startup_ipi_hook = paravirt_nop,
+ #endif
+ };
+
+-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
++#ifdef CONFIG_X86_32
++#ifdef CONFIG_X86_PAE
++/* 64-bit pagetable entries */
++#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
++#else
+ /* 32-bit pagetable entries */
+ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
++#endif
+ #else
+ /* 64-bit pagetable entries */
+ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
+ #endif
+
+-struct pv_mmu_ops pv_mmu_ops = {
++struct pv_mmu_ops pv_mmu_ops __read_only = {
+
+ .read_cr2 = native_read_cr2,
+ .write_cr2 = native_write_cr2,
+@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
+ .make_pud = PTE_IDENT,
+
+ .set_pgd = native_set_pgd,
++ .set_pgd_batched = native_set_pgd_batched,
+ #endif
+ #endif /* PAGETABLE_LEVELS >= 3 */
+
+@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
+ },
+
+ .set_fixmap = native_set_fixmap,
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .pax_open_kernel = native_pax_open_kernel,
++ .pax_close_kernel = native_pax_close_kernel,
++#endif
++
+ };
+
+ EXPORT_SYMBOL_GPL(pv_time_ops);
+diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
+index 35ccf75..7a15747 100644
+--- a/arch/x86/kernel/pci-iommu_table.c
++++ b/arch/x86/kernel/pci-iommu_table.c
+@@ -2,7 +2,7 @@
+ #include <asm/iommu_table.h>
+ #include <linux/string.h>
+ #include <linux/kallsyms.h>
+-
++#include <linux/sched.h>
+
+ #define DEBUG 1
+
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index ee5d4fb..426649b 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
+
+ void free_thread_info(struct thread_info *ti)
+ {
+- free_thread_xstate(ti->task);
+ free_pages((unsigned long)ti, THREAD_ORDER);
+ }
+
++static struct kmem_cache *task_struct_cachep;
++
+ void arch_task_cache_init(void)
+ {
+- task_xstate_cachep =
+- kmem_cache_create("task_xstate", xstate_size,
++ /* create a slab on which task_structs can be allocated */
++ task_struct_cachep =
++ kmem_cache_create("task_struct", sizeof(struct task_struct),
++ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
++
++ task_xstate_cachep =
++ kmem_cache_create("task_xstate", xstate_size,
+ __alignof__(union thread_xstate),
+- SLAB_PANIC | SLAB_NOTRACK, NULL);
++ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
++}
++
++struct task_struct *alloc_task_struct_node(int node)
++{
++ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
++}
++
++void free_task_struct(struct task_struct *task)
++{
++ free_thread_xstate(task);
++ kmem_cache_free(task_struct_cachep, task);
+ }
+
+ /*
+@@ -70,7 +87,7 @@ void exit_thread(void)
+ unsigned long *bp = t->io_bitmap_ptr;
+
+ if (bp) {
+- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++ struct tss_struct *tss = init_tss + get_cpu();
+
+ t->io_bitmap_ptr = NULL;
+ clear_thread_flag(TIF_IO_BITMAP);
+@@ -106,7 +123,7 @@ void show_regs_common(void)
+
+ printk(KERN_CONT "\n");
+ printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -120,6 +137,9 @@ void flush_thread(void)
+ {
+ struct task_struct *tsk = current;
+
++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
++ loadsegment(gs, 0);
++#endif
+ flush_ptrace_hw_breakpoint(tsk);
+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+ /*
+@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ regs.di = (unsigned long) arg;
+
+ #ifdef CONFIG_X86_32
+- regs.ds = __USER_DS;
+- regs.es = __USER_DS;
++ regs.ds = __KERNEL_DS;
++ regs.es = __KERNEL_DS;
+ regs.fs = __KERNEL_PERCPU;
+- regs.gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, regs.gs);
+ #else
+ regs.ss = __KERNEL_DS;
+ #endif
+@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
+
+ return ret;
+ }
+-void stop_this_cpu(void *dummy)
++__noreturn void stop_this_cpu(void *dummy)
+ {
+ local_irq_disable();
+ /*
+@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
+ }
+ early_param("idle", idle_setup);
+
+-unsigned long arch_align_stack(unsigned long sp)
++#ifdef CONFIG_PAX_RANDKSTACK
++void pax_randomize_kstack(struct pt_regs *regs)
+ {
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
++ struct thread_struct *thread = &current->thread;
++ unsigned long time;
+
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
++ if (!randomize_va_space)
++ return;
++
++ if (v8086_mode(regs))
++ return;
+
++ rdtscl(time);
++
++ /* P4 seems to return a 0 LSB, ignore it */
++#ifdef CONFIG_MPENTIUM4
++ time &= 0x3EUL;
++ time <<= 2;
++#elif defined(CONFIG_X86_64)
++ time &= 0xFUL;
++ time <<= 4;
++#else
++ time &= 0x1FUL;
++ time <<= 3;
++#endif
++
++ thread->sp0 ^= time;
++ load_sp0(init_tss + smp_processor_id(), thread);
++
++#ifdef CONFIG_X86_64
++ percpu_write(kernel_stack, thread->sp0);
++#endif
++}
++#endif
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 795b79f..063767a 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+ unsigned long thread_saved_pc(struct task_struct *tsk)
+ {
+ return ((unsigned long *)tsk->thread.sp)[3];
++//XXX return tsk->thread.eip;
+ }
+
+ #ifndef CONFIG_SMP
+@@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
+ unsigned long sp;
+ unsigned short ss, gs;
+
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ sp = regs->sp;
+ ss = regs->ss & 0xffff;
+- gs = get_user_gs(regs);
+ } else {
+ sp = kernel_stack_pointer(regs);
+ savesegment(ss, ss);
+- savesegment(gs, gs);
+ }
++ gs = get_user_gs(regs);
+
+ show_regs_common();
+
+@@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
+ struct task_struct *tsk;
+ int err;
+
+- childregs = task_pt_regs(p);
++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
+ *childregs = *regs;
+ childregs->ax = 0;
+ childregs->sp = sp;
+
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.sp0 = (unsigned long) (childregs+1);
++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
+
+ p->thread.ip = (unsigned long) ret_from_fork;
+
+@@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ struct thread_struct *prev = &prev_p->thread,
+ *next = &next_p->thread;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+ bool preload_fpu;
+
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+@@ -331,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ */
+ lazy_save_gs(prev->gs);
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(task_thread_info(next_p)->addr_limit);
++#endif
++
+ /*
+ * Load the per-thread Thread-Local Storage descriptor.
+ */
+@@ -366,6 +371,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ */
+ arch_end_context_switch(next_p);
+
++ percpu_write(current_task, next_p);
++ percpu_write(current_tinfo, &next_p->tinfo);
++
+ if (preload_fpu)
+ __math_state_restore();
+
+@@ -375,8 +383,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ if (prev->gs | next->gs)
+ lazy_load_gs(next->gs);
+
+- percpu_write(current_task, next_p);
+-
+ return prev_p;
+ }
+
+@@ -406,4 +412,3 @@ unsigned long get_wchan(struct task_struct *p)
+ } while (count++ < 16);
+ return 0;
+ }
+-
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 3bd7e6e..90b2bcf 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -89,7 +89,7 @@ static void __exit_idle(void)
+ void exit_idle(void)
+ {
+ /* idle loop has pid 0 */
+- if (current->pid)
++ if (task_pid_nr(current))
+ return;
+ __exit_idle();
+ }
+@@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
+ struct pt_regs *childregs;
+ struct task_struct *me = current;
+
+- childregs = ((struct pt_regs *)
+- (THREAD_SIZE + task_stack_page(p))) - 1;
++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
+ *childregs = *regs;
+
+ childregs->ax = 0;
+@@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.sp0 = (unsigned long) (childregs+1);
+ p->thread.usersp = me->thread.usersp;
++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
+
+ set_tsk_thread_flag(p, TIF_FORK);
+
+@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ struct thread_struct *prev = &prev_p->thread;
+ struct thread_struct *next = &next_p->thread;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+ unsigned fsindex, gsindex;
+ bool preload_fpu;
+
+@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ prev->usersp = percpu_read(old_rsp);
+ percpu_write(old_rsp, next->usersp);
+ percpu_write(current_task, next_p);
++ percpu_write(current_tinfo, &next_p->tinfo);
+
+- percpu_write(kernel_stack,
+- (unsigned long)task_stack_page(next_p) +
+- THREAD_SIZE - KERNEL_STACK_OFFSET);
++ percpu_write(kernel_stack, next->sp0);
+
+ /*
+ * Now maybe reload the debug registers and handle I/O bitmaps
+@@ -540,12 +539,11 @@ unsigned long get_wchan(struct task_struct *p)
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+ stack = (unsigned long)task_stack_page(p);
+- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
++ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
+ return 0;
+ fp = *(u64 *)(p->thread.sp);
+ do {
+- if (fp < (unsigned long)stack ||
+- fp >= (unsigned long)stack+THREAD_SIZE)
++ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
+ return 0;
+ ip = *(u64 *)(fp+8);
+ if (!in_sched_functions(ip))
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index 8252879..d3219e0 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+ {
+ int ret;
+- unsigned long __user *datap = (unsigned long __user *)data;
++ unsigned long __user *datap = (__force unsigned long __user *)data;
+
+ switch (request) {
+ /* read the word at location addr in the USER area. */
+@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
+ if ((int) addr < 0)
+ return -EIO;
+ ret = do_get_thread_area(child, addr,
+- (struct user_desc __user *)data);
++ (__force struct user_desc __user *) data);
+ break;
+
+ case PTRACE_SET_THREAD_AREA:
+ if ((int) addr < 0)
+ return -EIO;
+ ret = do_set_thread_area(child, addr,
+- (struct user_desc __user *)data, 0);
++ (__force struct user_desc __user *) data, 0);
+ break;
+ #endif
+
+@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+ info->si_code = si_code;
+- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
++ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
+ }
+
+ void user_single_step_siginfo(struct task_struct *tsk,
+diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
+index 42eb330..139955c 100644
+--- a/arch/x86/kernel/pvclock.c
++++ b/arch/x86/kernel/pvclock.c
+@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
+ return pv_tsc_khz;
+ }
+
+-static atomic64_t last_value = ATOMIC64_INIT(0);
++static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
+
+ void pvclock_resume(void)
+ {
+- atomic64_set(&last_value, 0);
++ atomic64_set_unchecked(&last_value, 0);
+ }
+
+ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+ * updating at the same time, and one of them could be slightly behind,
+ * making the assumption that last_value always go forward fail to hold.
+ */
+- last = atomic64_read(&last_value);
++ last = atomic64_read_unchecked(&last_value);
+ do {
+ if (ret < last)
+ return last;
+- last = atomic64_cmpxchg(&last_value, last, ret);
++ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
+ } while (unlikely(last != ret));
+
+ return ret;
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 37a458b..e63d183 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
+ EXPORT_SYMBOL(pm_power_off);
+
+ static const struct desc_ptr no_idt = {};
+-static int reboot_mode;
++static unsigned short reboot_mode;
+ enum reboot_type reboot_type = BOOT_ACPI;
+ int reboot_force;
+
+@@ -324,13 +324,17 @@ core_initcall(reboot_init);
+ extern const unsigned char machine_real_restart_asm[];
+ extern const u64 machine_real_restart_gdt[3];
+
+-void machine_real_restart(unsigned int type)
++__noreturn void machine_real_restart(unsigned int type)
+ {
+ void *restart_va;
+ unsigned long restart_pa;
+- void (*restart_lowmem)(unsigned int);
++ void (* __noreturn restart_lowmem)(unsigned int);
+ u64 *lowmem_gdt;
+
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++ struct desc_struct *gdt;
++#endif
++
+ local_irq_disable();
+
+ /* Write zero to CMOS register number 0x0f, which the BIOS POST
+@@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
+ boot)". This seems like a fairly standard thing that gets set by
+ REBOOT.COM programs, and the previous reset routine did this
+ too. */
+- *((unsigned short *)0x472) = reboot_mode;
++ *(unsigned short *)(__va(0x472)) = reboot_mode;
+
+ /* Patch the GDT in the low memory trampoline */
+ lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
+
+ restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
+ restart_pa = virt_to_phys(restart_va);
+- restart_lowmem = (void (*)(unsigned int))restart_pa;
++ restart_lowmem = (void *)restart_pa;
+
+ /* GDT[0]: GDT self-pointer */
+ lowmem_gdt[0] =
+@@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
+ GDT_ENTRY(0x009b, restart_pa, 0xffff);
+
+ /* Jump to the identity-mapped low memory code */
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++ gdt = get_cpu_gdt_table(smp_processor_id());
++ pax_open_kernel();
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
++ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
++ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
++ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
++ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
++ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
++#endif
++ pax_close_kernel();
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
++ unreachable();
++#else
+ restart_lowmem(type);
++#endif
++
+ }
+ #ifdef CONFIG_APM_MODULE
+ EXPORT_SYMBOL(machine_real_restart);
+@@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
+ * try to force a triple fault and then cycle between hitting the keyboard
+ * controller and doing that
+ */
+-static void native_machine_emergency_restart(void)
++__noreturn static void native_machine_emergency_restart(void)
+ {
+ int i;
+ int attempt = 0;
+@@ -664,13 +694,13 @@ void native_machine_shutdown(void)
+ #endif
+ }
+
+-static void __machine_emergency_restart(int emergency)
++static __noreturn void __machine_emergency_restart(int emergency)
+ {
+ reboot_emergency = emergency;
+ machine_ops.emergency_restart();
+ }
+
+-static void native_machine_restart(char *__unused)
++static __noreturn void native_machine_restart(char *__unused)
+ {
+ printk("machine restart\n");
+
+@@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
+ __machine_emergency_restart(0);
+ }
+
+-static void native_machine_halt(void)
++static __noreturn void native_machine_halt(void)
+ {
+ /* stop other cpus and apics */
+ machine_shutdown();
+@@ -690,7 +720,7 @@ static void native_machine_halt(void)
+ stop_this_cpu(NULL);
+ }
+
+-static void native_machine_power_off(void)
++__noreturn static void native_machine_power_off(void)
+ {
+ if (pm_power_off) {
+ if (!reboot_force)
+@@ -699,6 +729,7 @@ static void native_machine_power_off(void)
+ }
+ /* a fallback in case there is no PM info available */
+ tboot_shutdown(TB_SHUTDOWN_HALT);
++ unreachable();
+ }
+
+ struct machine_ops machine_ops = {
+diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
+index 7a6f3b3..bed145d7 100644
+--- a/arch/x86/kernel/relocate_kernel_64.S
++++ b/arch/x86/kernel/relocate_kernel_64.S
+@@ -11,6 +11,7 @@
+ #include <asm/kexec.h>
+ #include <asm/processor-flags.h>
+ #include <asm/pgtable_types.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * Must be relocatable PIC code callable as a C function
+@@ -160,13 +161,14 @@ identity_mapped:
+ xorq %rbp, %rbp
+ xorq %r8, %r8
+ xorq %r9, %r9
+- xorq %r10, %r9
++ xorq %r10, %r10
+ xorq %r11, %r11
+ xorq %r12, %r12
+ xorq %r13, %r13
+ xorq %r14, %r14
+ xorq %r15, %r15
+
++ pax_force_retaddr 0, 1
+ ret
+
+ 1:
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index cf0ef98..e3f780b 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
+
+ switch (data->type) {
+ case SETUP_E820_EXT:
+- parse_e820_ext(data);
++ parse_e820_ext((struct setup_data __force_kernel *)data);
+ break;
+ case SETUP_DTB:
+ add_dtb(pa_data);
+@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
+ * area (640->1Mb) as ram even though it is not.
+ * take them out.
+ */
+- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
++ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ }
+
+@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
+
+ if (!boot_params.hdr.root_flags)
+ root_mountflags &= ~MS_RDONLY;
+- init_mm.start_code = (unsigned long) _text;
+- init_mm.end_code = (unsigned long) _etext;
++ init_mm.start_code = ktla_ktva((unsigned long) _text);
++ init_mm.end_code = ktla_ktva((unsigned long) _etext);
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = _brk_end;
+
+- code_resource.start = virt_to_phys(_text);
+- code_resource.end = virt_to_phys(_etext)-1;
+- data_resource.start = virt_to_phys(_etext);
++ code_resource.start = virt_to_phys(ktla_ktva(_text));
++ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
++ data_resource.start = virt_to_phys(_sdata);
+ data_resource.end = virt_to_phys(_edata)-1;
+ bss_resource.start = virt_to_phys(&__bss_start);
+ bss_resource.end = virt_to_phys(&__bss_stop)-1;
+diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
+index 71f4727..16dc9f7 100644
+--- a/arch/x86/kernel/setup_percpu.c
++++ b/arch/x86/kernel/setup_percpu.c
+@@ -21,19 +21,17 @@
+ #include <asm/cpu.h>
+ #include <asm/stackprotector.h>
+
+-DEFINE_PER_CPU(int, cpu_number);
++#ifdef CONFIG_SMP
++DEFINE_PER_CPU(unsigned int, cpu_number);
+ EXPORT_PER_CPU_SYMBOL(cpu_number);
++#endif
+
+-#ifdef CONFIG_X86_64
+ #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
+-#else
+-#define BOOT_PERCPU_OFFSET 0
+-#endif
+
+ DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+ EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
+-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
++unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
+ [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
+ };
+ EXPORT_SYMBOL(__per_cpu_offset);
+@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
+ {
+ #ifdef CONFIG_X86_32
+ struct desc_struct gdt;
++ unsigned long base = per_cpu_offset(cpu);
+
+- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
+- 0x2 | DESCTYPE_S, 0x8);
+- gdt.s = 1;
++ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
++ 0x83 | DESCTYPE_S, 0xC);
+ write_gdt_entry(get_cpu_gdt_table(cpu),
+ GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
+ #endif
+@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
+ /* alrighty, percpu areas up and running */
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu) {
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_X86_32
++ unsigned long canary = per_cpu(stack_canary.canary, cpu);
++#endif
++#endif
+ per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
+ per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+ per_cpu(cpu_number, cpu) = cpu;
+@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
+ */
+ set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
+ #endif
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_X86_32
++ if (!cpu)
++ per_cpu(stack_canary.canary, cpu) = canary;
++#endif
++#endif
+ /*
+ * Up to this point, the boot CPU has been using .init.data
+ * area. Reload any changed state for the boot CPU.
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 54ddaeb2..22c3bdc 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
+ * Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0.
+ */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ #else /* !CONFIG_X86_32 */
+ sp = round_down(sp, 16) - 8;
+ #endif
+@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (onsigstack && !likely(on_sig_stack(sp)))
+- return (void __user *)-1L;
++ return (__force void __user *)-1L;
+
+ /* save i387 state */
+ if (used_math() && save_i387_xstate(*fpstate) < 0)
+- return (void __user *)-1L;
++ return (__force void __user *)-1L;
+
+ return (void __user *)sp;
+ }
+@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
+ }
+
+ if (current->mm->context.vdso)
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
+ else
+- restorer = &frame->retcode;
++ restorer = (void __user *)&frame->retcode;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+
+@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
+ * reasons and because gdb uses it as a signature to notice
+ * signal handler stack frames.
+ */
+- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
++ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
+
+ if (err)
+ return -EFAULT;
+@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. */
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
++ if (current->mm->context.vdso)
++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
++ else
++ restorer = (void __user *)&frame->retcode;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+ put_user_ex(restorer, &frame->pretcode);
+@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ * reasons and because gdb uses it as a signature to notice
+ * signal handler stack frames.
+ */
+- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
++ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
+ * X86_32: vm86 regs switched out by assembly code before reaching
+ * here, so testing against kernel CS suffices.
+ */
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ return;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 9f548cb..caf76f7 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
+ set_idle_for_cpu(cpu, c_idle.idle);
+ do_rest:
+ per_cpu(current_task, cpu) = c_idle.idle;
++ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
+ #ifdef CONFIG_X86_32
+ /* Stack for startup_32 can be just as for start_secondary onwards */
+ irq_ctx_init(cpu);
+ #else
+ clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
+ initial_gs = per_cpu_offset(cpu);
+- per_cpu(kernel_stack, cpu) =
+- (unsigned long)task_stack_page(c_idle.idle) -
+- KERNEL_STACK_OFFSET + THREAD_SIZE;
++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
+ #endif
++
++ pax_open_kernel();
+ early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
++ pax_close_kernel();
++
+ initial_code = (unsigned long)start_secondary;
+ stack_start = c_idle.idle->thread.sp;
+
+@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
+
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ KERNEL_PGD_PTRS);
++#endif
++
+ err = do_boot_cpu(apicid, cpu);
+ if (err) {
+ pr_debug("do_boot_cpu failed %d\n", err);
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
+index c346d11..d43b163 100644
+--- a/arch/x86/kernel/step.c
++++ b/arch/x86/kernel/step.c
+@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
+ struct desc_struct *desc;
+ unsigned long base;
+
+- seg &= ~7UL;
++ seg >>= 3;
+
+ mutex_lock(&child->mm->context.lock);
+- if (unlikely((seg >> 3) >= child->mm->context.size))
++ if (unlikely(seg >= child->mm->context.size))
+ addr = -1L; /* bogus selector, access would fault */
+ else {
+ desc = child->mm->context.ldt + seg;
+@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
+ addr += base;
+ }
+ mutex_unlock(&child->mm->context.lock);
+- }
++ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
++ addr = ktla_ktva(addr);
+
+ return addr;
+ }
+@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
+ unsigned char opcode[15];
+ unsigned long addr = convert_ip_to_linear(child, regs);
+
++ if (addr == -EINVAL)
++ return 0;
++
+ copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+ for (i = 0; i < copied; i++) {
+ switch (opcode[i]) {
+diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
+index 0b0cb5f..db6b9ed 100644
+--- a/arch/x86/kernel/sys_i386_32.c
++++ b/arch/x86/kernel/sys_i386_32.c
+@@ -24,17 +24,224 @@
+
+ #include <asm/syscalls.h>
+
+-/*
+- * Do a system call from kernel instead of calling sys_execve so we
+- * end up with proper pt_regs.
+- */
+-int kernel_execve(const char *filename,
+- const char *const argv[],
+- const char *const envp[])
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
+ {
+- long __res;
+- asm volatile ("int $0x80"
+- : "=a" (__res)
+- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
+- return __res;
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size || addr > pax_task_size - len)
++ return -EINVAL;
++
++ return 0;
++}
++
++unsigned long
++arch_get_unmapped_area(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff, unsigned long flags)
++{
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ unsigned long start_addr, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (len > pax_task_size)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
++ }
++ if (len > mm->cached_hole_size) {
++ start_addr = addr = mm->free_area_cache;
++ } else {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ }
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
++ start_addr = 0x00110000UL;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ start_addr += mm->delta_mmap & 0x03FFF000UL;
++#endif
++
++ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
++ start_addr = addr = mm->mmap_base;
++ else
++ addr = start_addr;
++ }
++#endif
++
++full_search:
++ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
++ /* At this point: (!vma || addr < vma->vm_end). */
++ if (pax_task_size - len < addr) {
++ /*
++ * Start a new search - just in case we missed
++ * some holes.
++ */
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ goto full_search;
++ }
++ return -ENOMEM;
++ }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
++ if (addr + mm->cached_hole_size < vma->vm_start)
++ mm->cached_hole_size = vma->vm_start - addr;
++ addr = vma->vm_end;
++ if (mm->start_brk <= addr && addr < mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ goto full_search;
++ }
++ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
++}
++
++unsigned long
++arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
++ const unsigned long len, const unsigned long pgoff,
++ const unsigned long flags)
++{
++ struct vm_area_struct *vma;
++ struct mm_struct *mm = current->mm;
++ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ /* requested length too big for entire address space */
++ if (len > pax_task_size)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
++ goto bottomup;
++#endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++ /* requesting a specific address */
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
++ }
++
++ /* check if free_area_cache is useful for us */
++ if (len <= mm->cached_hole_size) {
++ mm->cached_hole_size = 0;
++ mm->free_area_cache = mm->mmap_base;
++ }
++
++ /* either no address requested or can't fit in requested address hole */
++ addr = mm->free_area_cache;
++
++ /* make sure it can fit in the remaining address space */
++ if (addr > len) {
++ vma = find_vma(mm, addr-len);
++ if (check_heap_stack_gap(vma, addr - len, len))
++ /* remember the address as a hint for next time */
++ return (mm->free_area_cache = addr-len);
++ }
++
++ if (mm->mmap_base < len)
++ goto bottomup;
++
++ addr = mm->mmap_base-len;
++
++ do {
++ /*
++ * Lookup failure means no vma is above this address,
++ * else if new region fits below vma->vm_start,
++ * return with success:
++ */
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ /* remember the address as a hint for next time */
++ return (mm->free_area_cache = addr);
++
++ /* remember the largest hole we saw so far */
++ if (addr + mm->cached_hole_size < vma->vm_start)
++ mm->cached_hole_size = vma->vm_start - addr;
++
++ /* try just below the current vma->vm_start */
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
++
++bottomup:
++ /*
++ * A failed mmap() very likely causes application failure,
++ * so fall back to the bottom-up function here. This scenario
++ * can happen with large stack limits and large mmap()
++ * allocations.
++ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
++ mm->cached_hole_size = ~0UL;
++ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
++ /*
++ * Restore the topdown base:
++ */
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
++ mm->cached_hole_size = ~0UL;
++
++ return addr;
+ }
+diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
+index 0514890..3dbebce 100644
+--- a/arch/x86/kernel/sys_x86_64.c
++++ b/arch/x86/kernel/sys_x86_64.c
+@@ -95,8 +95,8 @@ out:
+ return error;
+ }
+
+-static void find_start_end(unsigned long flags, unsigned long *begin,
+- unsigned long *end)
++static void find_start_end(struct mm_struct *mm, unsigned long flags,
++ unsigned long *begin, unsigned long *end)
+ {
+ if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
+ unsigned long new_begin;
+@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
+ *begin = new_begin;
+ }
+ } else {
+- *begin = TASK_UNMAPPED_BASE;
++ *begin = mm->mmap_base;
+ *end = TASK_SIZE;
+ }
+ }
+@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (flags & MAP_FIXED)
+ return addr;
+
+- find_start_end(flags, &begin, &end);
++ find_start_end(mm, flags, &begin, &end);
+
+ if (len > end)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (end - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
+@@ -172,7 +175,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+- unsigned long addr = addr0;
++ unsigned long base = mm->mmap_base, addr = addr0;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
+ goto bottomup;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
+@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ ALIGN_TOPDOWN);
+
+ vma = find_vma(mm, tmp_addr);
+- if (!vma || tmp_addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, tmp_addr, len))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = tmp_addr;
+ }
+@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr;
+
+@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- } while (len < vma->vm_start);
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -270,13 +278,21 @@ bottomup:
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+- mm->free_area_cache = mm->mmap_base;
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
+index 9a0e312..e6f66f2 100644
+--- a/arch/x86/kernel/syscall_table_32.S
++++ b/arch/x86/kernel/syscall_table_32.S
+@@ -1,3 +1,4 @@
++.section .rodata,"a",@progbits
+ ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
+ .long sys_exit
+diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
+index e2410e2..4fe3fbc 100644
+--- a/arch/x86/kernel/tboot.c
++++ b/arch/x86/kernel/tboot.c
+@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
+
+ void tboot_shutdown(u32 shutdown_type)
+ {
+- void (*shutdown)(void);
++ void (* __noreturn shutdown)(void);
+
+ if (!tboot_enabled())
+ return;
+@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
+
+ switch_to_tboot_pt();
+
+- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
++ shutdown = (void *)tboot->shutdown_entry;
+ shutdown();
+
+ /* should not reach here */
+@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
+ tboot_shutdown(acpi_shutdown_map[sleep_state]);
+ }
+
+-static atomic_t ap_wfs_count;
++static atomic_unchecked_t ap_wfs_count;
+
+ static int tboot_wait_for_aps(int num_aps)
+ {
+@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
+ {
+ switch (action) {
+ case CPU_DYING:
+- atomic_inc(&ap_wfs_count);
++ atomic_inc_unchecked(&ap_wfs_count);
+ if (num_online_cpus() == 1)
+- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
++ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
+ return NOTIFY_BAD;
+ break;
+ }
+@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
+
+ tboot_create_trampoline();
+
+- atomic_set(&ap_wfs_count, 0);
++ atomic_set_unchecked(&ap_wfs_count, 0);
+ register_hotcpu_notifier(&tboot_cpu_notifier);
+ return 0;
+ }
+diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
+index dd5fbf4..b7f2232 100644
+--- a/arch/x86/kernel/time.c
++++ b/arch/x86/kernel/time.c
+@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
+ {
+ unsigned long pc = instruction_pointer(regs);
+
+- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ if (!user_mode(regs) && in_lock_functions(pc)) {
+ #ifdef CONFIG_FRAME_POINTER
+- return *(unsigned long *)(regs->bp + sizeof(long));
++ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
+ #else
+ unsigned long *sp =
+ (unsigned long *)kernel_stack_pointer(regs);
+@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
+ * or above a saved flags. Eflags has bits 22-31 zero,
+ * kernel addresses don't.
+ */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return ktla_ktva(sp[0]);
++#else
+ if (sp[0] >> 22)
+ return sp[0];
+ if (sp[1] >> 22)
+ return sp[1];
+ #endif
++
++#endif
+ }
+ return pc;
+ }
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+index 6bb7b85..dd853e1 100644
+--- a/arch/x86/kernel/tls.c
++++ b/arch/x86/kernel/tls.c
+@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
++ return -EINVAL;
++#endif
++
+ set_tls_desc(p, idx, &info, 1);
+
+ return 0;
+diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
+index 451c0a7..e57f551 100644
+--- a/arch/x86/kernel/trampoline_32.S
++++ b/arch/x86/kernel/trampoline_32.S
+@@ -32,6 +32,12 @@
+ #include <asm/segment.h>
+ #include <asm/page_types.h>
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) ((X) - __PAGE_OFFSET)
++#endif
++
+ #ifdef CONFIG_SMP
+
+ .section ".x86_trampoline","a"
+@@ -62,7 +68,7 @@ r_base = .
+ inc %ax # protected mode (PE) bit
+ lmsw %ax # into protected mode
+ # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
+- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
++ ljmpl $__BOOT_CS, $ta(startup_32_smp)
+
+ # These need to be in the same 64K segment as the above;
+ # hence we don't use the boot_gdt_descr defined in head.S
+diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
+index 09ff517..df19fbff 100644
+--- a/arch/x86/kernel/trampoline_64.S
++++ b/arch/x86/kernel/trampoline_64.S
+@@ -90,7 +90,7 @@ startup_32:
+ movl $__KERNEL_DS, %eax # Initialize the %ds segment register
+ movl %eax, %ds
+
+- movl $X86_CR4_PAE, %eax
++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
+ movl %eax, %cr4 # Enable PAE mode
+
+ # Setup trampoline 4 level pagetables
+@@ -138,7 +138,7 @@ tidt:
+ # so the kernel can live anywhere
+ .balign 4
+ tgdt:
+- .short tgdt_end - tgdt # gdt limit
++ .short tgdt_end - tgdt - 1 # gdt limit
+ .long tgdt - r_base
+ .short 0
+ .quad 0x00cf9b000000ffff # __KERNEL32_CS
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index a8e3eb8..c9dbd7d 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
+
+ /* Do we ignore FPU interrupts ? */
+ char ignore_fpu_irq;
+-
+-/*
+- * The IDT has to be page-aligned to simplify the Pentium
+- * F0 0F bug workaround.
+- */
+-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
+ #endif
+
+ DECLARE_BITMAP(used_vectors, NR_VECTORS);
+@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
+ }
+
+ static void __kprobes
+-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
++do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
+ long error_code, siginfo_t *info)
+ {
+ struct task_struct *tsk = current;
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK) {
++ if (v8086_mode(regs)) {
+ /*
+ * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
+ * On nmi (interrupt 2), do_trap should not be called.
+@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+ }
+ #endif
+
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto kernel_trap;
+
+ #ifdef CONFIG_X86_32
+@@ -148,7 +142,7 @@ trap_signal:
+ printk_ratelimit()) {
+ printk(KERN_INFO
+ "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
+- tsk->comm, tsk->pid, str,
++ tsk->comm, task_pid_nr(tsk), str,
+ regs->ip, regs->sp, error_code);
+ print_vma_addr(" in ", regs->ip);
+ printk("\n");
+@@ -165,8 +159,20 @@ kernel_trap:
+ if (!fixup_exception(regs)) {
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
++ str = "PAX: suspicious stack segment fault";
++#endif
++
+ die(str, regs, error_code);
+ }
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (trapnr == 4)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ return;
+
+ #ifdef CONFIG_X86_32
+@@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
+ conditional_sti(regs);
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK)
++ if (v8086_mode(regs))
+ goto gp_in_vm86;
+ #endif
+
+ tsk = current;
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto gp_in_kernel;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
++ struct mm_struct *mm = tsk->mm;
++ unsigned long limit;
++
++ down_write(&mm->mmap_sem);
++ limit = mm->context.user_cs_limit;
++ if (limit < TASK_SIZE) {
++ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
++ up_write(&mm->mmap_sem);
++ return;
++ }
++ up_write(&mm->mmap_sem);
++ }
++#endif
++
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 13;
+
+@@ -295,6 +317,13 @@ gp_in_kernel:
+ if (notify_die(DIE_GPF, "general protection fault", regs,
+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
+ return;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
++ die("PAX: suspicious general protection fault", regs, error_code);
++ else
++#endif
++
+ die("general protection fault", regs, error_code);
+ }
+
+@@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+ /* It's safe to allow irq's after DR6 has been saved */
+ preempt_conditional_sti(regs);
+
+- if (regs->flags & X86_VM_MASK) {
++ if (v8086_mode(regs)) {
+ handle_vm86_trap((struct kernel_vm86_regs *) regs,
+ error_code, 1);
+ preempt_conditional_cli(regs);
+@@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+ * We already checked v86 mode above, so we can check for kernel mode
+ * by just checking the CPL of CS.
+ */
+- if ((dr6 & DR_STEP) && !user_mode(regs)) {
++ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
+ tsk->thread.debugreg6 &= ~DR_STEP;
+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+ regs->flags &= ~X86_EFLAGS_TF;
+@@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+ return;
+ conditional_sti(regs);
+
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ {
+ if (!fixup_exception(regs)) {
+ task->thread.error_code = error_code;
+@@ -568,7 +597,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
+ void __math_state_restore(void)
+ {
+ struct thread_info *thread = current_thread_info();
+- struct task_struct *tsk = thread->task;
++ struct task_struct *tsk = current;
+
+ /*
+ * Paranoid restore. send a SIGSEGV if we fail to restore the state.
+@@ -595,8 +624,7 @@ void __math_state_restore(void)
+ */
+ asmlinkage void math_state_restore(void)
+ {
+- struct thread_info *thread = current_thread_info();
+- struct task_struct *tsk = thread->task;
++ struct task_struct *tsk = current;
+
+ if (!tsk_used_math(tsk)) {
+ local_irq_enable();
+diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
+index b9242ba..50c5edd 100644
+--- a/arch/x86/kernel/verify_cpu.S
++++ b/arch/x86/kernel/verify_cpu.S
+@@ -20,6 +20,7 @@
+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
+ * arch/x86/kernel/head_32.S: processor startup
++ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
+ *
+ * verify_cpu, returns the status of longmode and SSE in register %eax.
+ * 0: Success 1: Failure
+diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
+index 863f875..4307295 100644
+--- a/arch/x86/kernel/vm86_32.c
++++ b/arch/x86/kernel/vm86_32.c
+@@ -41,6 +41,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/audit.h>
+ #include <linux/stddef.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
+ do_exit(SIGSEGV);
+ }
+
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+ current->thread.sp0 = current->thread.saved_sp0;
+ current->thread.sysenter_cs = __KERNEL_CS;
+ load_sp0(tss, &current->thread);
+@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
+ struct task_struct *tsk;
+ int tmp, ret = -EPERM;
+
++#ifdef CONFIG_GRKERNSEC_VM86
++ if (!capable(CAP_SYS_RAWIO)) {
++ gr_handle_vm86();
++ goto out;
++ }
++#endif
++
+ tsk = current;
+ if (tsk->thread.saved_sp0)
+ goto out;
+@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
+ int tmp, ret;
+ struct vm86plus_struct __user *v86;
+
++#ifdef CONFIG_GRKERNSEC_VM86
++ if (!capable(CAP_SYS_RAWIO)) {
++ gr_handle_vm86();
++ ret = -EPERM;
++ goto out;
++ }
++#endif
++
+ tsk = current;
+ switch (cmd) {
+ case VM86_REQUEST_IRQ:
+@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
+ tsk->thread.saved_fs = info->regs32->fs;
+ tsk->thread.saved_gs = get_user_gs(info->regs32);
+
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+ tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ if (cpu_has_sep)
+ tsk->thread.sysenter_cs = 0;
+@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
+ goto cannot_handle;
+ if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
+ goto cannot_handle;
+- intr_ptr = (unsigned long __user *) (i << 2);
++ intr_ptr = (__force unsigned long __user *) (i << 2);
+ if (get_user(segoffs, intr_ptr))
+ goto cannot_handle;
+ if ((segoffs >> 16) == BIOSSEG)
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index 0f703f1..9e15f64 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -26,6 +26,13 @@
+ #include <asm/page_types.h>
+ #include <asm/cache.h>
+ #include <asm/boot.h>
++#include <asm/segment.h>
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
++#else
++#define __KERNEL_TEXT_OFFSET 0
++#endif
+
+ #undef i386 /* in case the preprocessor is a 32bit one */
+
+@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
+
+ PHDRS {
+ text PT_LOAD FLAGS(5); /* R_E */
++#ifdef CONFIG_X86_32
++ module PT_LOAD FLAGS(5); /* R_E */
++#endif
++#ifdef CONFIG_XEN
++ rodata PT_LOAD FLAGS(5); /* R_E */
++#else
++ rodata PT_LOAD FLAGS(4); /* R__ */
++#endif
+ data PT_LOAD FLAGS(6); /* RW_ */
+-#ifdef CONFIG_X86_64
++ init.begin PT_LOAD FLAGS(6); /* RW_ */
+ #ifdef CONFIG_SMP
+ percpu PT_LOAD FLAGS(6); /* RW_ */
+ #endif
++ text.init PT_LOAD FLAGS(5); /* R_E */
++ text.exit PT_LOAD FLAGS(5); /* R_E */
+ init PT_LOAD FLAGS(7); /* RWE */
+-#endif
+ note PT_NOTE FLAGS(0); /* ___ */
+ }
+
+ SECTIONS
+ {
+ #ifdef CONFIG_X86_32
+- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
+- phys_startup_32 = startup_32 - LOAD_OFFSET;
++ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
+ #else
+- . = __START_KERNEL;
+- phys_startup_64 = startup_64 - LOAD_OFFSET;
++ . = __START_KERNEL;
+ #endif
+
+ /* Text and read-only data */
+- .text : AT(ADDR(.text) - LOAD_OFFSET) {
+- _text = .;
++ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
+ /* bootstrapping code */
++#ifdef CONFIG_X86_32
++ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#else
++ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#endif
++ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++ _text = .;
+ HEAD_TEXT
+ #ifdef CONFIG_X86_32
+ . = ALIGN(PAGE_SIZE);
+@@ -108,13 +128,47 @@ SECTIONS
+ IRQENTRY_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+- /* End of text section */
+- _etext = .;
+ } :text = 0x9090
+
+- NOTES :text :note
++ . += __KERNEL_TEXT_OFFSET;
+
+- EXCEPTION_TABLE(16) :text = 0x9090
++#ifdef CONFIG_X86_32
++ . = ALIGN(PAGE_SIZE);
++ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
++ MODULES_EXEC_VADDR = .;
++ BYTE(0)
++ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
++ . = ALIGN(HPAGE_SIZE);
++ MODULES_EXEC_END = . - 1;
++#endif
++
++ } :module
++#endif
++
++ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
++ /* End of text section */
++ _etext = . - __KERNEL_TEXT_OFFSET;
++ }
++
++#ifdef CONFIG_X86_32
++ . = ALIGN(PAGE_SIZE);
++ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
++ *(.idt)
++ . = ALIGN(PAGE_SIZE);
++ *(.empty_zero_page)
++ *(.initial_pg_fixmap)
++ *(.initial_pg_pmd)
++ *(.initial_page_table)
++ *(.swapper_pg_dir)
++ } :rodata
++#endif
++
++ . = ALIGN(PAGE_SIZE);
++ NOTES :rodata :note
++
++ EXCEPTION_TABLE(16) :rodata
+
+ #if defined(CONFIG_DEBUG_RODATA)
+ /* .text should occupy whole number of pages */
+@@ -126,16 +180,20 @@ SECTIONS
+
+ /* Data */
+ .data : AT(ADDR(.data) - LOAD_OFFSET) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(HPAGE_SIZE);
++#else
++ . = ALIGN(PAGE_SIZE);
++#endif
++
+ /* Start of data section */
+ _sdata = .;
+
+ /* init_task */
+ INIT_TASK_DATA(THREAD_SIZE)
+
+-#ifdef CONFIG_X86_32
+- /* 32 bit has nosave before _edata */
+ NOSAVE_DATA
+-#endif
+
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
+
+@@ -176,12 +234,19 @@ SECTIONS
+ #endif /* CONFIG_X86_64 */
+
+ /* Init code and data - will be freed after init */
+- . = ALIGN(PAGE_SIZE);
+ .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
++ BYTE(0)
++
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(HPAGE_SIZE);
++#else
++ . = ALIGN(PAGE_SIZE);
++#endif
++
+ __init_begin = .; /* paired with __init_end */
+- }
++ } :init.begin
+
+-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
++#ifdef CONFIG_SMP
+ /*
+ * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
+ * output PHDR, so the next output section - .init.text - should
+@@ -190,12 +255,27 @@ SECTIONS
+ PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
+ #endif
+
+- INIT_TEXT_SECTION(PAGE_SIZE)
+-#ifdef CONFIG_X86_64
+- :init
+-#endif
++ . = ALIGN(PAGE_SIZE);
++ init_begin = .;
++ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
++ VMLINUX_SYMBOL(_sinittext) = .;
++ INIT_TEXT
++ VMLINUX_SYMBOL(_einittext) = .;
++ . = ALIGN(PAGE_SIZE);
++ } :text.init
+
+- INIT_DATA_SECTION(16)
++ /*
++ * .exit.text is discard at runtime, not link time, to deal with
++ * references from .altinstructions and .eh_frame
++ */
++ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
++ EXIT_TEXT
++ . = ALIGN(16);
++ } :text.exit
++ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
++
++ . = ALIGN(PAGE_SIZE);
++ INIT_DATA_SECTION(16) :init
+
+ /*
+ * Code and data for a variety of lowlevel trampolines, to be
+@@ -269,19 +349,12 @@ SECTIONS
+ }
+
+ . = ALIGN(8);
+- /*
+- * .exit.text is discard at runtime, not link time, to deal with
+- * references from .altinstructions and .eh_frame
+- */
+- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
+- EXIT_TEXT
+- }
+
+ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
+ EXIT_DATA
+ }
+
+-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
++#ifndef CONFIG_SMP
+ PERCPU_SECTION(INTERNODE_CACHE_BYTES)
+ #endif
+
+@@ -300,16 +373,10 @@ SECTIONS
+ .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+ __smp_locks = .;
+ *(.smp_locks)
+- . = ALIGN(PAGE_SIZE);
+ __smp_locks_end = .;
++ . = ALIGN(PAGE_SIZE);
+ }
+
+-#ifdef CONFIG_X86_64
+- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+- NOSAVE_DATA
+- }
+-#endif
+-
+ /* BSS */
+ . = ALIGN(PAGE_SIZE);
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+@@ -325,6 +392,7 @@ SECTIONS
+ __brk_base = .;
+ . += 64 * 1024; /* 64k alignment slop space */
+ *(.brk_reservation) /* areas brk users have reserved */
++ . = ALIGN(HPAGE_SIZE);
+ __brk_limit = .;
+ }
+
+@@ -351,13 +419,12 @@ SECTIONS
+ * for the boot processor.
+ */
+ #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
+-INIT_PER_CPU(gdt_page);
+ INIT_PER_CPU(irq_stack_union);
+
+ /*
+ * Build-time check on the image size:
+ */
+-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
++. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
+ "kernel image bigger than KERNEL_IMAGE_SIZE");
+
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
+index e4d4a22..47ee71f 100644
+--- a/arch/x86/kernel/vsyscall_64.c
++++ b/arch/x86/kernel/vsyscall_64.c
+@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
+ .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
+ };
+
+-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
++static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
+
+ static int __init vsyscall_setup(char *str)
+ {
+ if (str) {
+ if (!strcmp("emulate", str))
+ vsyscall_mode = EMULATE;
+- else if (!strcmp("native", str))
+- vsyscall_mode = NATIVE;
+ else if (!strcmp("none", str))
+ vsyscall_mode = NONE;
+ else
+@@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
+
+ tsk = current;
+ if (seccomp_mode(&tsk->seccomp))
+- do_exit(SIGKILL);
++ do_group_exit(SIGKILL);
+
+ switch (vsyscall_nr) {
+ case 0:
+@@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
+ return true;
+
+ sigsegv:
+- force_sig(SIGSEGV, current);
+- return true;
++ do_group_exit(SIGKILL);
+ }
+
+ /*
+@@ -274,10 +271,7 @@ void __init map_vsyscall(void)
+ extern char __vvar_page;
+ unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
+
+- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
+- vsyscall_mode == NATIVE
+- ? PAGE_KERNEL_VSYSCALL
+- : PAGE_KERNEL_VVAR);
++ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
+ BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
+ (unsigned long)VSYSCALL_START);
+
+diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
+index 9796c2f..f686fbf 100644
+--- a/arch/x86/kernel/x8664_ksyms_64.c
++++ b/arch/x86/kernel/x8664_ksyms_64.c
+@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
+ EXPORT_SYMBOL(copy_user_generic_string);
+ EXPORT_SYMBOL(copy_user_generic_unrolled);
+ EXPORT_SYMBOL(__copy_user_nocache);
+-EXPORT_SYMBOL(_copy_from_user);
+-EXPORT_SYMBOL(_copy_to_user);
+
+ EXPORT_SYMBOL(copy_page);
+ EXPORT_SYMBOL(clear_page);
+diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
+index a391134..d0b63b6e 100644
+--- a/arch/x86/kernel/xsave.c
++++ b/arch/x86/kernel/xsave.c
+@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
+ fx_sw_user->xstate_size > fx_sw_user->extended_size)
+ return -EINVAL;
+
+- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
++ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
+ fx_sw_user->extended_size -
+ FP_XSTATE_MAGIC2_SIZE));
+ if (err)
+@@ -267,7 +267,7 @@ fx_only:
+ * the other extended state.
+ */
+ xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
+- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
++ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
+ }
+
+ /*
+@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
+ if (use_xsave())
+ err = restore_user_xstate(buf);
+ else
+- err = fxrstor_checking((__force struct i387_fxsave_struct *)
++ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
+ buf);
+ if (unlikely(err)) {
+ /*
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index f1e3be1..588efc8 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -249,6 +249,7 @@ struct gprefix {
+
+ #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
+ do { \
++ unsigned long _tmp; \
+ __asm__ __volatile__ ( \
+ _PRE_EFLAGS("0", "4", "2") \
+ _op _suffix " %"_x"3,%1; " \
+@@ -263,8 +264,6 @@ struct gprefix {
+ /* Raw emulation: instruction has two explicit operands. */
+ #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
+ do { \
+- unsigned long _tmp; \
+- \
+ switch ((ctxt)->dst.bytes) { \
+ case 2: \
+ ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
+@@ -280,7 +279,6 @@ struct gprefix {
+
+ #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
+ do { \
+- unsigned long _tmp; \
+ switch ((ctxt)->dst.bytes) { \
+ case 1: \
+ ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 54abb40..a192606 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -53,7 +53,7 @@
+ #define APIC_BUS_CYCLE_NS 1
+
+ /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
+-#define apic_debug(fmt, arg...)
++#define apic_debug(fmt, arg...) do {} while (0)
+
+ #define APIC_LVT_NUM 6
+ /* 14 is the version for Xeon and Pentium 8.4.8*/
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index f1b36cf..af8a124 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+
+ pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
+
+- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
++ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
+
+ /*
+ * Assume that the pte write on a page table of the same type
+@@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ }
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
++ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
+ gentry = 0;
+ kvm_mmu_free_some_pages(vcpu);
+ ++vcpu->kvm->stat.mmu_pte_write;
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index 9299410..ade2f9b 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -197,7 +197,7 @@ retry_walk:
+ if (unlikely(kvm_is_error_hva(host_addr)))
+ goto error;
+
+- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
++ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
+ if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
+ goto error;
+
+@@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
+ if (need_flush)
+ kvm_flush_remote_tlbs(vcpu->kvm);
+
+- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
++ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
+
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index e32243e..a6e6172 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
+ int cpu = raw_smp_processor_id();
+
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
++
++ pax_open_kernel();
+ sd->tss_desc->type = 9; /* available 32/64-bit TSS */
++ pax_close_kernel();
++
+ load_TR_desc();
+ }
+
+@@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+ #endif
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ reload_tss(vcpu);
+
+ local_irq_disable();
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 579a0b5..ed7bbf9 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1305,7 +1305,11 @@ static void reload_tss(void)
+ struct desc_struct *descs;
+
+ descs = (void *)gdt->address;
++
++ pax_open_kernel();
+ descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
++ pax_close_kernel();
++
+ load_TR_desc();
+ }
+
+@@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
+ if (!cpu_has_vmx_flexpriority())
+ flexpriority_enabled = 0;
+
+- if (!cpu_has_vmx_tpr_shadow())
+- kvm_x86_ops->update_cr8_intercept = NULL;
++ if (!cpu_has_vmx_tpr_shadow()) {
++ pax_open_kernel();
++ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
++ pax_close_kernel();
++ }
+
+ if (enable_ept && !cpu_has_vmx_ept_2m_page())
+ kvm_disable_largepages();
+@@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
+ vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
+
+ asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
+- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
++ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
+
+ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
+ vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
+@@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ "jmp .Lkvm_vmx_return \n\t"
+ ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
+ ".Lkvm_vmx_return: "
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
++ ".Lkvm_vmx_return2: "
++#endif
++
+ /* Save guest registers, load host registers, keep flags */
+ "mov %0, %c[wordsize](%%"R"sp) \n\t"
+ "pop %0 \n\t"
+@@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+ [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
+ [wordsize]"i"(sizeof(ulong))
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ ,[cs]"i"(__KERNEL_CS)
++#endif
++
+ : "cc", "memory"
+ , R"ax", R"bx", R"di", R"si"
+ #ifdef CONFIG_X86_64
+@@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ }
+ }
+
+- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ loadsegment(fs, __KERNEL_PERCPU);
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ vmx->loaded_vmcs->launched = 1;
+
+ vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 4c938da..4ddef65 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+ {
+ struct kvm *kvm = vcpu->kvm;
+ int lm = is_long_mode(vcpu);
+- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
+- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
++ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
++ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
+ u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
+ : kvm->arch.xen_hvm_config.blob_size_32;
+ u32 page_num = data & ~PAGE_MASK;
+@@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+ if (n < msr_list.nmsrs)
+ goto out;
+ r = -EFAULT;
++ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
++ goto out;
+ if (copy_to_user(user_msr_list->indices, &msrs_to_save,
+ num_msrs_to_save * sizeof(u32)))
+ goto out;
+@@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+ {
+- int r;
++ int r, i;
+
+ r = -E2BIG;
+ if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+ goto out;
+ r = -EFAULT;
+- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
+- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
++ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+ goto out;
++ for (i = 0; i < cpuid->nent; ++i) {
++ struct kvm_cpuid_entry2 cpuid_entry;
++ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
++ goto out;
++ vcpu->arch.cpuid_entries[i] = cpuid_entry;
++ }
+ vcpu->arch.cpuid_nent = cpuid->nent;
+ kvm_apic_set_version(vcpu);
+ kvm_x86_ops->cpuid_update(vcpu);
+@@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+ {
+- int r;
++ int r, i;
+
+ r = -E2BIG;
+ if (cpuid->nent < vcpu->arch.cpuid_nent)
+ goto out;
+ r = -EFAULT;
+- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
++ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
+ goto out;
++ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
++ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
++ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
++ goto out;
++ }
+ return 0;
+
+ out:
+@@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
+ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
+ {
+- if (irq->irq < 0 || irq->irq >= 256)
++ if (irq->irq >= 256)
+ return -EINVAL;
+ if (irqchip_in_kernel(vcpu->kvm))
+ return -ENXIO;
+@@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
+ kvm_mmu_set_mmio_spte_mask(mask);
+ }
+
+-int kvm_arch_init(void *opaque)
++int kvm_arch_init(const void *opaque)
+ {
+ int r;
+ struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
+diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
+index cf4603b..7cdde38 100644
+--- a/arch/x86/lguest/boot.c
++++ b/arch/x86/lguest/boot.c
+@@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
+ * Rebooting also tells the Host we're finished, but the RESTART flag tells the
+ * Launcher to reboot us.
+ */
+-static void lguest_restart(char *reason)
++static __noreturn void lguest_restart(char *reason)
+ {
+ hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
++ BUG();
+ }
+
+ /*G:050
+diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
+index 042f682..c92afb6 100644
+--- a/arch/x86/lib/atomic64_32.c
++++ b/arch/x86/lib/atomic64_32.c
+@@ -8,18 +8,30 @@
+
+ long long atomic64_read_cx8(long long, const atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_read_cx8);
++long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
+ long long atomic64_set_cx8(long long, const atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_set_cx8);
++long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
+ long long atomic64_xchg_cx8(long long, unsigned high);
+ EXPORT_SYMBOL(atomic64_xchg_cx8);
+ long long atomic64_add_return_cx8(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_add_return_cx8);
++long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
+ long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_sub_return_cx8);
++long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
+ long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_inc_return_cx8);
++long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
+ long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_return_cx8);
++long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
+ long long atomic64_dec_if_positive_cx8(atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
+ int atomic64_inc_not_zero_cx8(atomic64_t *v);
+@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
+ #ifndef CONFIG_X86_CMPXCHG64
+ long long atomic64_read_386(long long, const atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_read_386);
++long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_read_unchecked_386);
+ long long atomic64_set_386(long long, const atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_set_386);
++long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_set_unchecked_386);
+ long long atomic64_xchg_386(long long, unsigned high);
+ EXPORT_SYMBOL(atomic64_xchg_386);
+ long long atomic64_add_return_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_add_return_386);
++long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
+ long long atomic64_sub_return_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_sub_return_386);
++long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
+ long long atomic64_inc_return_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_inc_return_386);
++long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
+ long long atomic64_dec_return_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_return_386);
++long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
+ long long atomic64_add_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_add_386);
++long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_add_unchecked_386);
+ long long atomic64_sub_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_sub_386);
++long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_sub_unchecked_386);
+ long long atomic64_inc_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_inc_386);
++long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_inc_unchecked_386);
+ long long atomic64_dec_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_386);
++long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_dec_unchecked_386);
+ long long atomic64_dec_if_positive_386(atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_if_positive_386);
+ int atomic64_inc_not_zero_386(atomic64_t *v);
+diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
+index e8e7e0d..56fd1b0 100644
+--- a/arch/x86/lib/atomic64_386_32.S
++++ b/arch/x86/lib/atomic64_386_32.S
+@@ -48,6 +48,10 @@ BEGIN(read)
+ movl (v), %eax
+ movl 4(v), %edx
+ RET_ENDP
++BEGIN(read_unchecked)
++ movl (v), %eax
++ movl 4(v), %edx
++RET_ENDP
+ #undef v
+
+ #define v %esi
+@@ -55,6 +59,10 @@ BEGIN(set)
+ movl %ebx, (v)
+ movl %ecx, 4(v)
+ RET_ENDP
++BEGIN(set_unchecked)
++ movl %ebx, (v)
++ movl %ecx, 4(v)
++RET_ENDP
+ #undef v
+
+ #define v %esi
+@@ -70,6 +78,20 @@ RET_ENDP
+ BEGIN(add)
+ addl %eax, (v)
+ adcl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ subl %eax, (v)
++ sbbl %edx, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(add_unchecked)
++ addl %eax, (v)
++ adcl %edx, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -77,6 +99,24 @@ RET_ENDP
+ BEGIN(add_return)
+ addl (v), %eax
+ adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(add_return_unchecked)
++ addl (v), %eax
++ adcl 4(v), %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -86,6 +126,20 @@ RET_ENDP
+ BEGIN(sub)
+ subl %eax, (v)
+ sbbl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ addl %eax, (v)
++ adcl %edx, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(sub_unchecked)
++ subl %eax, (v)
++ sbbl %edx, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -96,6 +150,27 @@ BEGIN(sub_return)
+ sbbl $0, %edx
+ addl (v), %eax
+ adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(sub_return_unchecked)
++ negl %edx
++ negl %eax
++ sbbl $0, %edx
++ addl (v), %eax
++ adcl 4(v), %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -105,6 +180,20 @@ RET_ENDP
+ BEGIN(inc)
+ addl $1, (v)
+ adcl $0, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ subl $1, (v)
++ sbbl $0, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(inc_unchecked)
++ addl $1, (v)
++ adcl $0, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -114,6 +203,26 @@ BEGIN(inc_return)
+ movl 4(v), %edx
+ addl $1, %eax
+ adcl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(inc_return_unchecked)
++ movl (v), %eax
++ movl 4(v), %edx
++ addl $1, %eax
++ adcl $0, %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -123,6 +232,20 @@ RET_ENDP
+ BEGIN(dec)
+ subl $1, (v)
+ sbbl $0, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ addl $1, (v)
++ adcl $0, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(dec_unchecked)
++ subl $1, (v)
++ sbbl $0, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -132,6 +255,26 @@ BEGIN(dec_return)
+ movl 4(v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(dec_return_unchecked)
++ movl (v), %eax
++ movl 4(v), %edx
++ subl $1, %eax
++ sbbl $0, %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -143,6 +286,13 @@ BEGIN(add_unless)
+ adcl %edx, %edi
+ addl (v), %eax
+ adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
+ cmpl %eax, %esi
+ je 3f
+ 1:
+@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
+ 1:
+ addl $1, %eax
+ adcl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
+ movl %eax, (v)
+ movl %edx, 4(v)
+ movl $1, %eax
+@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
+ movl 4(v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 1f)
++#endif
++
+ js 1f
+ movl %eax, (v)
+ movl %edx, 4(v)
+diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
+index 391a083..d658e9f 100644
+--- a/arch/x86/lib/atomic64_cx8_32.S
++++ b/arch/x86/lib/atomic64_cx8_32.S
+@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
+ CFI_STARTPROC
+
+ read64 %ecx
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_read_cx8)
+
++ENTRY(atomic64_read_unchecked_cx8)
++ CFI_STARTPROC
++
++ read64 %ecx
++ pax_force_retaddr
++ ret
++ CFI_ENDPROC
++ENDPROC(atomic64_read_unchecked_cx8)
++
+ ENTRY(atomic64_set_cx8)
+ CFI_STARTPROC
+
+@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
+ cmpxchg8b (%esi)
+ jne 1b
+
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_set_cx8)
+
++ENTRY(atomic64_set_unchecked_cx8)
++ CFI_STARTPROC
++
++1:
++/* we don't need LOCK_PREFIX since aligned 64-bit writes
++ * are atomic on 586 and newer */
++ cmpxchg8b (%esi)
++ jne 1b
++
++ pax_force_retaddr
++ ret
++ CFI_ENDPROC
++ENDPROC(atomic64_set_unchecked_cx8)
++
+ ENTRY(atomic64_xchg_cx8)
+ CFI_STARTPROC
+
+@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
+ cmpxchg8b (%esi)
+ jne 1b
+
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_xchg_cx8)
+
+-.macro addsub_return func ins insc
+-ENTRY(atomic64_\func\()_return_cx8)
++.macro addsub_return func ins insc unchecked=""
++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
+ CFI_STARTPROC
+ SAVE ebp
+ SAVE ebx
+@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
+ movl %edx, %ecx
+ \ins\()l %esi, %ebx
+ \insc\()l %edi, %ecx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++2:
++ _ASM_EXTABLE(2b, 3f)
++#endif
++.endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+-
+-10:
+ movl %ebx, %eax
+ movl %ecx, %edx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++3:
++#endif
++.endif
++
+ RESTORE edi
+ RESTORE esi
+ RESTORE ebx
+ RESTORE ebp
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-ENDPROC(atomic64_\func\()_return_cx8)
++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
+ .endm
+
+ addsub_return add add adc
+ addsub_return sub sub sbb
++addsub_return add add adc _unchecked
++addsub_return sub sub sbb _unchecked
+
+-.macro incdec_return func ins insc
+-ENTRY(atomic64_\func\()_return_cx8)
++.macro incdec_return func ins insc unchecked
++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
+ movl %edx, %ecx
+ \ins\()l $1, %ebx
+ \insc\()l $0, %ecx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++2:
++ _ASM_EXTABLE(2b, 3f)
++#endif
++.endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+-10:
+ movl %ebx, %eax
+ movl %ecx, %edx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++3:
++#endif
++.endif
++
+ RESTORE ebx
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-ENDPROC(atomic64_\func\()_return_cx8)
++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
+ .endm
+
+ incdec_return inc add adc
+ incdec_return dec sub sbb
++incdec_return inc add adc _unchecked
++incdec_return dec sub sbb _unchecked
+
+ ENTRY(atomic64_dec_if_positive_cx8)
+ CFI_STARTPROC
+@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
+ movl %edx, %ecx
+ subl $1, %ebx
+ sbb $0, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
+ js 2f
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
+ movl %ebx, %eax
+ movl %ecx, %edx
+ RESTORE ebx
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_dec_if_positive_cx8)
+@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
+ movl %edx, %ecx
+ addl %esi, %ebx
+ adcl %edi, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 3f)
++#endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
+ CFI_ADJUST_CFA_OFFSET -8
+ RESTORE ebx
+ RESTORE ebp
++ pax_force_retaddr
+ ret
+ 4:
+ cmpl %edx, 4(%esp)
+@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
+ movl %edx, %ecx
+ addl $1, %ebx
+ adcl $0, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 3f)
++#endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
+ movl $1, %eax
+ 3:
+ RESTORE ebx
++ pax_force_retaddr
+ ret
+ 4:
+ testl %edx, %edx
+diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
+index 78d16a5..fbcf666 100644
+--- a/arch/x86/lib/checksum_32.S
++++ b/arch/x86/lib/checksum_32.S
+@@ -28,7 +28,8 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+ #include <asm/errno.h>
+-
++#include <asm/segment.h>
++
+ /*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
+
+ #define ARGBASE 16
+ #define FP 12
+-
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+ CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %es
++ jmp csum_partial_copy_generic
++#endif
++
++ENTRY(csum_partial_copy_generic_from_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %ds
++#endif
++
++ENTRY(csum_partial_copy_generic)
+ subl $4,%esp
+ CFI_ADJUST_CFA_OFFSET 4
+ pushl_cfi %edi
+@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
+ jmp 4f
+ SRC(1: movw (%esi), %bx )
+ addl $2, %esi
+-DST( movw %bx, (%edi) )
++DST( movw %bx, %es:(%edi) )
+ addl $2, %edi
+ addw %bx, %ax
+ adcl $0, %eax
+@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
+ SRC(1: movl (%esi), %ebx )
+ SRC( movl 4(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, (%edi) )
++DST( movl %ebx, %es:(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 4(%edi) )
++DST( movl %edx, %es:4(%edi) )
+
+ SRC( movl 8(%esi), %ebx )
+ SRC( movl 12(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 8(%edi) )
++DST( movl %ebx, %es:8(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 12(%edi) )
++DST( movl %edx, %es:12(%edi) )
+
+ SRC( movl 16(%esi), %ebx )
+ SRC( movl 20(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 16(%edi) )
++DST( movl %ebx, %es:16(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 20(%edi) )
++DST( movl %edx, %es:20(%edi) )
+
+ SRC( movl 24(%esi), %ebx )
+ SRC( movl 28(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 24(%edi) )
++DST( movl %ebx, %es:24(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 28(%edi) )
++DST( movl %edx, %es:28(%edi) )
+
+ lea 32(%esi), %esi
+ lea 32(%edi), %edi
+@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
+ shrl $2, %edx # This clears CF
+ SRC(3: movl (%esi), %ebx )
+ adcl %ebx, %eax
+-DST( movl %ebx, (%edi) )
++DST( movl %ebx, %es:(%edi) )
+ lea 4(%esi), %esi
+ lea 4(%edi), %edi
+ dec %edx
+@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
+ jb 5f
+ SRC( movw (%esi), %cx )
+ leal 2(%esi), %esi
+-DST( movw %cx, (%edi) )
++DST( movw %cx, %es:(%edi) )
+ leal 2(%edi), %edi
+ je 6f
+ shll $16,%ecx
+ SRC(5: movb (%esi), %cl )
+-DST( movb %cl, (%edi) )
++DST( movb %cl, %es:(%edi) )
+ 6: addl %ecx, %eax
+ adcl $0, %eax
+ 7:
+@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
+
+ 6001:
+ movl ARGBASE+20(%esp), %ebx # src_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+
+ # zero the complete destination - computing the rest
+ # is too much work
+@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
+
+ 6002:
+ movl ARGBASE+24(%esp), %ebx # dst_err_ptr
+- movl $-EFAULT,(%ebx)
++ movl $-EFAULT,%ss:(%ebx)
+ jmp 5000b
+
+ .previous
+
++ pushl_cfi %ss
++ popl_cfi %ds
++ pushl_cfi %ss
++ popl_cfi %es
+ popl_cfi %ebx
+ CFI_RESTORE ebx
+ popl_cfi %esi
+@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
+ popl_cfi %ecx # equivalent to addl $4,%esp
+ ret
+ CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+
+ #else
+
+ /* Version for PentiumII/PPro */
+
+ #define ROUND1(x) \
++ nop; nop; nop; \
+ SRC(movl x(%esi), %ebx ) ; \
+ addl %ebx, %eax ; \
+- DST(movl %ebx, x(%edi) ) ;
++ DST(movl %ebx, %es:x(%edi)) ;
+
+ #define ROUND(x) \
++ nop; nop; nop; \
+ SRC(movl x(%esi), %ebx ) ; \
+ adcl %ebx, %eax ; \
+- DST(movl %ebx, x(%edi) ) ;
++ DST(movl %ebx, %es:x(%edi)) ;
+
+ #define ARGBASE 12
+-
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+ CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %es
++ jmp csum_partial_copy_generic
++#endif
++
++ENTRY(csum_partial_copy_generic_from_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %ds
++#endif
++
++ENTRY(csum_partial_copy_generic)
+ pushl_cfi %ebx
+ CFI_REL_OFFSET ebx, 0
+ pushl_cfi %edi
+@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
+ subl %ebx, %edi
+ lea -1(%esi),%edx
+ andl $-32,%edx
+- lea 3f(%ebx,%ebx), %ebx
++ lea 3f(%ebx,%ebx,2), %ebx
+ testl %esi, %esi
+ jmp *%ebx
+ 1: addl $64,%esi
+@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
+ jb 5f
+ SRC( movw (%esi), %dx )
+ leal 2(%esi), %esi
+-DST( movw %dx, (%edi) )
++DST( movw %dx, %es:(%edi) )
+ leal 2(%edi), %edi
+ je 6f
+ shll $16,%edx
+ 5:
+ SRC( movb (%esi), %dl )
+-DST( movb %dl, (%edi) )
++DST( movb %dl, %es:(%edi) )
+ 6: addl %edx, %eax
+ adcl $0, %eax
+ 7:
+ .section .fixup, "ax"
+ 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+ # zero the complete destination (computing the rest is too much work)
+ movl ARGBASE+8(%esp),%edi # dst
+ movl ARGBASE+12(%esp),%ecx # len
+@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
+ rep; stosb
+ jmp 7b
+ 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+ jmp 7b
+ .previous
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %ss
++ popl_cfi %ds
++ pushl_cfi %ss
++ popl_cfi %es
++#endif
++
+ popl_cfi %esi
+ CFI_RESTORE esi
+ popl_cfi %edi
+@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
+ CFI_RESTORE ebx
+ ret
+ CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+
+ #undef ROUND
+ #undef ROUND1
+diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
+index f2145cf..cea889d 100644
+--- a/arch/x86/lib/clear_page_64.S
++++ b/arch/x86/lib/clear_page_64.S
+@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
+ movl $4096/8,%ecx
+ xorl %eax,%eax
+ rep stosq
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(clear_page_c)
+@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
+ movl $4096,%ecx
+ xorl %eax,%eax
+ rep stosb
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(clear_page_c_e)
+@@ -43,6 +45,7 @@ ENTRY(clear_page)
+ leaq 64(%rdi),%rdi
+ jnz .Lloop
+ nop
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ .Lclear_page_end:
+@@ -58,7 +61,7 @@ ENDPROC(clear_page)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
+ 2: .byte 0xeb /* jmp <disp8> */
+diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
+index 1e572c5..2a162cd 100644
+--- a/arch/x86/lib/cmpxchg16b_emu.S
++++ b/arch/x86/lib/cmpxchg16b_emu.S
+@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
+
+ popf
+ mov $1, %al
++ pax_force_retaddr
+ ret
+
+ not_same:
+ popf
+ xor %al,%al
++ pax_force_retaddr
+ ret
+
+ CFI_ENDPROC
+diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
+index 01c805b..dccb07f 100644
+--- a/arch/x86/lib/copy_page_64.S
++++ b/arch/x86/lib/copy_page_64.S
+@@ -9,6 +9,7 @@ copy_page_c:
+ CFI_STARTPROC
+ movl $4096/8,%ecx
+ rep movsq
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(copy_page_c)
+@@ -39,7 +40,7 @@ ENTRY(copy_page)
+ movq 16 (%rsi), %rdx
+ movq 24 (%rsi), %r8
+ movq 32 (%rsi), %r9
+- movq 40 (%rsi), %r10
++ movq 40 (%rsi), %r13
+ movq 48 (%rsi), %r11
+ movq 56 (%rsi), %r12
+
+@@ -50,7 +51,7 @@ ENTRY(copy_page)
+ movq %rdx, 16 (%rdi)
+ movq %r8, 24 (%rdi)
+ movq %r9, 32 (%rdi)
+- movq %r10, 40 (%rdi)
++ movq %r13, 40 (%rdi)
+ movq %r11, 48 (%rdi)
+ movq %r12, 56 (%rdi)
+
+@@ -69,7 +70,7 @@ ENTRY(copy_page)
+ movq 16 (%rsi), %rdx
+ movq 24 (%rsi), %r8
+ movq 32 (%rsi), %r9
+- movq 40 (%rsi), %r10
++ movq 40 (%rsi), %r13
+ movq 48 (%rsi), %r11
+ movq 56 (%rsi), %r12
+
+@@ -78,7 +79,7 @@ ENTRY(copy_page)
+ movq %rdx, 16 (%rdi)
+ movq %r8, 24 (%rdi)
+ movq %r9, 32 (%rdi)
+- movq %r10, 40 (%rdi)
++ movq %r13, 40 (%rdi)
+ movq %r11, 48 (%rdi)
+ movq %r12, 56 (%rdi)
+
+@@ -95,6 +96,7 @@ ENTRY(copy_page)
+ CFI_RESTORE r13
+ addq $3*8,%rsp
+ CFI_ADJUST_CFA_OFFSET -3*8
++ pax_force_retaddr
+ ret
+ .Lcopy_page_end:
+ CFI_ENDPROC
+@@ -105,7 +107,7 @@ ENDPROC(copy_page)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
+ 2:
+diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
+index 0248402..821c786 100644
+--- a/arch/x86/lib/copy_user_64.S
++++ b/arch/x86/lib/copy_user_64.S
+@@ -16,6 +16,7 @@
+ #include <asm/thread_info.h>
+ #include <asm/cpufeature.h>
+ #include <asm/alternative-asm.h>
++#include <asm/pgtable.h>
+
+ /*
+ * By placing feature2 after feature1 in altinstructions section, we logically
+@@ -29,7 +30,7 @@
+ .byte 0xe9 /* 32bit jump */
+ .long \orig-1f /* by default jump to orig */
+ 1:
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 2: .byte 0xe9 /* near jump with 32bit immediate */
+ .long \alt1-1b /* offset */ /* or alternatively to alt1 */
+ 3: .byte 0xe9 /* near jump with 32bit immediate */
+@@ -71,47 +72,20 @@
+ #endif
+ .endm
+
+-/* Standard copy_to_user with segment limit checking */
+-ENTRY(_copy_to_user)
+- CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rdi,%rcx
+- addq %rdx,%rcx
+- jc bad_to_user
+- cmpq TI_addr_limit(%rax),%rcx
+- ja bad_to_user
+- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
+- copy_user_generic_unrolled,copy_user_generic_string, \
+- copy_user_enhanced_fast_string
+- CFI_ENDPROC
+-ENDPROC(_copy_to_user)
+-
+-/* Standard copy_from_user with segment limit checking */
+-ENTRY(_copy_from_user)
+- CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rsi,%rcx
+- addq %rdx,%rcx
+- jc bad_from_user
+- cmpq TI_addr_limit(%rax),%rcx
+- ja bad_from_user
+- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
+- copy_user_generic_unrolled,copy_user_generic_string, \
+- copy_user_enhanced_fast_string
+- CFI_ENDPROC
+-ENDPROC(_copy_from_user)
+-
+ .section .fixup,"ax"
+ /* must zero dest */
+ ENTRY(bad_from_user)
+ bad_from_user:
+ CFI_STARTPROC
++ testl %edx,%edx
++ js bad_to_user
+ movl %edx,%ecx
+ xorl %eax,%eax
+ rep
+ stosb
+ bad_to_user:
+ movl %edx,%eax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(bad_from_user)
+@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
+ jz 17f
+ 1: movq (%rsi),%r8
+ 2: movq 1*8(%rsi),%r9
+-3: movq 2*8(%rsi),%r10
++3: movq 2*8(%rsi),%rax
+ 4: movq 3*8(%rsi),%r11
+ 5: movq %r8,(%rdi)
+ 6: movq %r9,1*8(%rdi)
+-7: movq %r10,2*8(%rdi)
++7: movq %rax,2*8(%rdi)
+ 8: movq %r11,3*8(%rdi)
+ 9: movq 4*8(%rsi),%r8
+ 10: movq 5*8(%rsi),%r9
+-11: movq 6*8(%rsi),%r10
++11: movq 6*8(%rsi),%rax
+ 12: movq 7*8(%rsi),%r11
+ 13: movq %r8,4*8(%rdi)
+ 14: movq %r9,5*8(%rdi)
+-15: movq %r10,6*8(%rdi)
++15: movq %rax,6*8(%rdi)
+ 16: movq %r11,7*8(%rdi)
+ leaq 64(%rsi),%rsi
+ leaq 64(%rdi),%rdi
+@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
+ decl %ecx
+ jnz 21b
+ 23: xor %eax,%eax
++ pax_force_retaddr
+ ret
+
+ .section .fixup,"ax"
+@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
+ 3: rep
+ movsb
+ 4: xorl %eax,%eax
++ pax_force_retaddr
+ ret
+
+ .section .fixup,"ax"
+@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
+ 1: rep
+ movsb
+ 2: xorl %eax,%eax
++ pax_force_retaddr
+ ret
+
+ .section .fixup,"ax"
+diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
+index cb0c112..e3a6895 100644
+--- a/arch/x86/lib/copy_user_nocache_64.S
++++ b/arch/x86/lib/copy_user_nocache_64.S
+@@ -8,12 +8,14 @@
+
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
++#include <asm/alternative-asm.h>
+
+ #define FIX_ALIGNMENT 1
+
+ #include <asm/current.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
++#include <asm/pgtable.h>
+
+ .macro ALIGN_DESTINATION
+ #ifdef FIX_ALIGNMENT
+@@ -50,6 +52,15 @@
+ */
+ ENTRY(__copy_user_nocache)
+ CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%rcx
++ cmp %rcx,%rsi
++ jae 1f
++ add %rcx,%rsi
++1:
++#endif
++
+ cmpl $8,%edx
+ jb 20f /* less then 8 bytes, go to byte copy loop */
+ ALIGN_DESTINATION
+@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
+ jz 17f
+ 1: movq (%rsi),%r8
+ 2: movq 1*8(%rsi),%r9
+-3: movq 2*8(%rsi),%r10
++3: movq 2*8(%rsi),%rax
+ 4: movq 3*8(%rsi),%r11
+ 5: movnti %r8,(%rdi)
+ 6: movnti %r9,1*8(%rdi)
+-7: movnti %r10,2*8(%rdi)
++7: movnti %rax,2*8(%rdi)
+ 8: movnti %r11,3*8(%rdi)
+ 9: movq 4*8(%rsi),%r8
+ 10: movq 5*8(%rsi),%r9
+-11: movq 6*8(%rsi),%r10
++11: movq 6*8(%rsi),%rax
+ 12: movq 7*8(%rsi),%r11
+ 13: movnti %r8,4*8(%rdi)
+ 14: movnti %r9,5*8(%rdi)
+-15: movnti %r10,6*8(%rdi)
++15: movnti %rax,6*8(%rdi)
+ 16: movnti %r11,7*8(%rdi)
+ leaq 64(%rsi),%rsi
+ leaq 64(%rdi),%rdi
+@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
+ jnz 21b
+ 23: xorl %eax,%eax
+ sfence
++ pax_force_retaddr
+ ret
+
+ .section .fixup,"ax"
+diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
+index fb903b7..c92b7f7 100644
+--- a/arch/x86/lib/c