aboutsummaryrefslogtreecommitdiffstats
path: root/main
diff options
context:
space:
mode:
authorWilliam Pitcock <nenolod@dereferenced.org>2011-02-15 18:58:22 -0600
committerWilliam Pitcock <nenolod@dereferenced.org>2011-02-15 18:58:22 -0600
commitd6e368b0b2b8343f8b833daa28ed88f59df31441 (patch)
treed49912382c27285865971a3fc42d16f2c112d1c5 /main
parent2989f0cb668ef547c2a842f1bc34b331ac72d6b4 (diff)
downloadaports-d6e368b0b2b8343f8b833daa28ed88f59df31441.tar.gz
aports-d6e368b0b2b8343f8b833daa28ed88f59df31441.tar.bz2
aports-d6e368b0b2b8343f8b833daa28ed88f59df31441.tar.xz
main/linux-grsec: upgrade to 2.6.37
Diffstat (limited to 'main')
-rw-r--r--main/linux-grsec/0001-Staging-hv-fix-sleeping-while-atomic-issue.patch38
-rw-r--r--main/linux-grsec/0001-xfrm-use-gre-key-as-flow-upper-protocol-info.patch139
-rw-r--r--main/linux-grsec/0004-arp-flush-arp-cache-on-device-change.patch29
-rw-r--r--main/linux-grsec/0004-staging-hv-fix-netvsc-sleeping-while-atomic.patch42
-rw-r--r--main/linux-grsec/APKBUILD29
-rw-r--r--main/linux-grsec/grsecurity-2.2.1-2.6.37-201102121148.patch (renamed from main/linux-grsec/grsecurity-2.2.0-2.6.35.10-unofficial.patch)28197
-rw-r--r--main/linux-grsec/kernelconfig.x86785
-rw-r--r--main/linux-grsec/kernelconfig.x86_64439
-rw-r--r--main/linux-grsec/r8169-add-gro-support.patch52
-rw-r--r--main/linux-grsec/setlocalversion.patch11
-rw-r--r--main/linux-grsec/xfrm-fix-gre-key-endianess.patch43
-rw-r--r--main/xorg-server/APKBUILD2
12 files changed, 15017 insertions, 14789 deletions
diff --git a/main/linux-grsec/0001-Staging-hv-fix-sleeping-while-atomic-issue.patch b/main/linux-grsec/0001-Staging-hv-fix-sleeping-while-atomic-issue.patch
deleted file mode 100644
index 1133c7591bf..00000000000
--- a/main/linux-grsec/0001-Staging-hv-fix-sleeping-while-atomic-issue.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From 6ee51b8d69833b3cd00901999c36c59fbfde24aa Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Timo=20Ter=C3=A4s?= <timo.teras@iki.fi>
-Date: Fri, 10 Dec 2010 16:23:26 +0200
-Subject: [PATCH] Staging: hv: fix sleeping while atomic issue
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-osd_schedule_callback() is called from VmbusOnMsgDPC() which runs
-in a tasklet. Avoid possible sleeping by using GFP_ATOMIC for the
-memory allocation.
-
-Seems to fix #16701.
-
-Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=16701
-Cc: Haiyang Zhang <haiyangz@microsoft.com>
-Cc: Hank Janssen <hjanssen@microsoft.com>
-Signed-off-by: Timo Teräs <timo.teras@iki.fi>
----
- drivers/staging/hv/osd.c | 2 +-
- 1 files changed, 1 insertions(+), 1 deletions(-)
-
-diff --git a/drivers/staging/hv/osd.c b/drivers/staging/hv/osd.c
-index 8c3eb27..eb9b20d 100644
---- a/drivers/staging/hv/osd.c
-+++ b/drivers/staging/hv/osd.c
-@@ -214,7 +214,7 @@ int osd_schedule_callback(struct workqueue_struct *wq,
- {
- struct osd_callback_struct *cb;
-
-- cb = kmalloc(sizeof(*cb), GFP_KERNEL);
-+ cb = kmalloc(sizeof(*cb), GFP_ATOMIC);
- if (!cb) {
- printk(KERN_ERR "unable to allocate memory in osd_schedule_callback\n");
- return -1;
---
-1.7.1
-
diff --git a/main/linux-grsec/0001-xfrm-use-gre-key-as-flow-upper-protocol-info.patch b/main/linux-grsec/0001-xfrm-use-gre-key-as-flow-upper-protocol-info.patch
deleted file mode 100644
index 4b260daccfb..00000000000
--- a/main/linux-grsec/0001-xfrm-use-gre-key-as-flow-upper-protocol-info.patch
+++ /dev/null
@@ -1,139 +0,0 @@
-From e4e3789c1d6d9cd30267c4395763577ceedd7015 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Timo=20Ter=C3=A4s?= <timo.teras@iki.fi>
-Date: Thu, 18 Nov 2010 11:42:16 +0200
-Subject: [PATCH] xfrm: use gre key as flow upper protocol info
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-The GRE Key field is intended to be used for identifying an individual
-traffic flow within a tunnel. It is useful to be able to have XFRM
-policy selector matches to have different policies for different
-GRE tunnels.
-
-Backported to linux-2.6.35 from the original version at:
-http://git.kernel.org/?p=linux/kernel/git/davem/net-next-2.6.git;
-a=commitdiff_plain;h=cc9ff19da9bf76a2f70bcb80225a1c587c162e52
-
-Signed-off-by: Timo Teräs <timo.teras@iki.fi>
----
- include/net/flow.h | 2 ++
- include/net/xfrm.h | 6 ++++++
- net/ipv4/ip_gre.c | 9 ++++++---
- net/ipv4/xfrm4_policy.c | 15 +++++++++++++++
- 4 files changed, 29 insertions(+), 3 deletions(-)
-
-diff --git a/include/net/flow.h b/include/net/flow.h
-index bb08692..240b7f3 100644
---- a/include/net/flow.h
-+++ b/include/net/flow.h
-@@ -66,6 +66,7 @@ struct flowi {
- } dnports;
-
- __be32 spi;
-+ __be32 gre_key;
-
- struct {
- __u8 type;
-@@ -77,6 +78,7 @@ struct flowi {
- #define fl_icmp_code uli_u.icmpt.code
- #define fl_ipsec_spi uli_u.spi
- #define fl_mh_type uli_u.mht.type
-+#define fl_gre_key uli_u.gre_key
- __u32 secid; /* used by xfrm; see secid.txt */
- } __attribute__((__aligned__(BITS_PER_LONG/8)));
-
-diff --git a/include/net/xfrm.h b/include/net/xfrm.h
-index fc8f36d..1a57ff9 100644
---- a/include/net/xfrm.h
-+++ b/include/net/xfrm.h
-@@ -805,6 +805,9 @@ __be16 xfrm_flowi_sport(struct flowi *fl)
- case IPPROTO_MH:
- port = htons(fl->fl_mh_type);
- break;
-+ case IPPROTO_GRE:
-+ port = htonl(fl->fl_gre_key) >> 16;
-+ break;
- default:
- port = 0; /*XXX*/
- }
-@@ -826,6 +829,9 @@ __be16 xfrm_flowi_dport(struct flowi *fl)
- case IPPROTO_ICMPV6:
- port = htons(fl->fl_icmp_code);
- break;
-+ case IPPROTO_GRE:
-+ port = htonl(fl->fl_gre_key) & 0xffff;
-+ break;
- default:
- port = 0; /*XXX*/
- }
-diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
-index 32618e1..d490d67 100644
---- a/net/ipv4/ip_gre.c
-+++ b/net/ipv4/ip_gre.c
-@@ -739,7 +739,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
- { .daddr = dst,
- .saddr = tiph->saddr,
- .tos = RT_TOS(tos) } },
-- .proto = IPPROTO_GRE };
-+ .proto = IPPROTO_GRE,
-+ .fl_gre_key = tunnel->parms.o_key };
- if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
- stats->tx_carrier_errors++;
- goto tx_error;
-@@ -912,7 +913,8 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
- { .daddr = iph->daddr,
- .saddr = iph->saddr,
- .tos = RT_TOS(iph->tos) } },
-- .proto = IPPROTO_GRE };
-+ .proto = IPPROTO_GRE,
-+ .fl_gre_key = tunnel->parms.o_key };
- struct rtable *rt;
- if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
- tdev = rt->u.dst.dev;
-@@ -1170,7 +1172,8 @@ static int ipgre_open(struct net_device *dev)
- { .daddr = t->parms.iph.daddr,
- .saddr = t->parms.iph.saddr,
- .tos = RT_TOS(t->parms.iph.tos) } },
-- .proto = IPPROTO_GRE };
-+ .proto = IPPROTO_GRE,
-+ .fl_gre_key = t->parms.o_key };
- struct rtable *rt;
- if (ip_route_output_key(dev_net(dev), &rt, &fl))
- return -EADDRNOTAVAIL;
-diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
-index 23883a4..ef36364 100644
---- a/net/ipv4/xfrm4_policy.c
-+++ b/net/ipv4/xfrm4_policy.c
-@@ -11,6 +11,7 @@
- #include <linux/err.h>
- #include <linux/kernel.h>
- #include <linux/inetdevice.h>
-+#include <linux/if_tunnel.h>
- #include <net/dst.h>
- #include <net/xfrm.h>
- #include <net/ip.h>
-@@ -158,6 +159,20 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
- fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
- }
- break;
-+
-+ case IPPROTO_GRE:
-+ if (pskb_may_pull(skb, xprth + 12 - skb->data)) {
-+ __be16 *greflags = (__be16 *)xprth;
-+ __be32 *gre_hdr = (__be32 *)xprth;
-+
-+ if (greflags[0] & GRE_KEY) {
-+ if (greflags[0] & GRE_CSUM)
-+ gre_hdr++;
-+ fl->fl_gre_key = gre_hdr[1];
-+ }
-+ }
-+ break;
-+
- default:
- fl->fl_ipsec_spi = 0;
- break;
---
-1.7.1
-
diff --git a/main/linux-grsec/0004-arp-flush-arp-cache-on-device-change.patch b/main/linux-grsec/0004-arp-flush-arp-cache-on-device-change.patch
deleted file mode 100644
index 85161ea3a39..00000000000
--- a/main/linux-grsec/0004-arp-flush-arp-cache-on-device-change.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 8a0e3ea4924059a7268446177d6869e3399adbb2 Mon Sep 17 00:00:00 2001
-From: Timo Teras <timo.teras@iki.fi>
-Date: Mon, 12 Apr 2010 13:46:45 +0000
-Subject: [PATCH 04/18] arp: flush arp cache on device change
-
-If IFF_NOARP is changed, we must flush the arp cache.
-
-Signed-off-by: Timo Teras <timo.teras@iki.fi>
----
- net/ipv4/arp.c | 3 +++
- 1 files changed, 3 insertions(+), 0 deletions(-)
-
-diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
-index 4e80f33..580bfc3 100644
---- a/net/ipv4/arp.c
-+++ b/net/ipv4/arp.c
-@@ -1200,6 +1200,9 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, vo
- neigh_changeaddr(&arp_tbl, dev);
- rt_cache_flush(dev_net(dev), 0);
- break;
-+ case NETDEV_CHANGE:
-+ neigh_changeaddr(&arp_tbl, dev);
-+ break;
- default:
- break;
- }
---
-1.7.0.2
-
diff --git a/main/linux-grsec/0004-staging-hv-fix-netvsc-sleeping-while-atomic.patch b/main/linux-grsec/0004-staging-hv-fix-netvsc-sleeping-while-atomic.patch
deleted file mode 100644
index 3ba0a1e3142..00000000000
--- a/main/linux-grsec/0004-staging-hv-fix-netvsc-sleeping-while-atomic.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-Subject: [PATCH] staging: hv: fix netvsc sleeping while atomic
-Date: Fri, 17 Dec 2010 11:40:24 +0200
-Message-Id: <1292578824-14408-1-git-send-email-timo.teras@iki.fi>
-X-Mailer: git-send-email 1.7.1
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-X-Virus-Scanned: ClamAV using ClamSMTP
-Status: O
-Content-Length: 845
-Lines: 29
-
-The channel callbacks are called directly from vmbus_event_dpc
-which runs in tasklet context. These callbacks need to use
-GFP_ATOMIC.
-
-Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=16701
-
-Cc: Hank Janssen <hjanssen@microsoft.com>
-Cc: Haiyang Zhang <haiyangz@microsoft.com>
-Signed-off-by: Timo Teräs <timo.teras@iki.fi>
----
- drivers/staging/hv/netvsc.c | 2 +-
- 1 files changed, 1 insertions(+), 1 deletions(-)
-
-diff --git a/drivers/staging/hv/netvsc.c b/drivers/staging/hv/netvsc.c
-index 8022781..3784923 100644
---- a/drivers/staging/hv/netvsc.c
-+++ b/drivers/staging/hv/netvsc.c
-@@ -1236,7 +1236,7 @@ static void NetVscOnChannelCallback(void *Context)
- /* ASSERT(device); */
-
- packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
-- GFP_KERNEL);
-+ GFP_ATOMIC);
- if (!packet)
- return;
- buffer = packet;
---
-1.7.1
-
-
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD
index 681ab859519..9c21db33e49 100644
--- a/main/linux-grsec/APKBUILD
+++ b/main/linux-grsec/APKBUILD
@@ -2,9 +2,9 @@
_flavor=grsec
pkgname=linux-${_flavor}
-pkgver=2.6.35.10
-_kernver=2.6.35
-pkgrel=3
+pkgver=2.6.37
+_kernver=2.6.37
+pkgrel=0
pkgdesc="Linux kernel with grsecurity"
url=http://grsecurity.net
depends="mkinitfs linux-firmware"
@@ -13,17 +13,8 @@ options="!strip"
_config=${config:-kernelconfig.${CARCH}}
install=
source="ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-$_kernver.tar.bz2
- http://www.kernel.org/pub/linux/kernel/v2.6/longterm/v${pkgver%.*}/patch-$pkgver.bz2
- grsecurity-2.2.0-$pkgver-unofficial.patch
- 0001-xfrm-use-gre-key-as-flow-upper-protocol-info.patch
- xfrm-fix-gre-key-endianess.patch
- 0004-arp-flush-arp-cache-on-device-change.patch
- r8169-add-gro-support.patch
+ grsecurity-2.2.1-2.6.37-201102121148.patch
- 0001-Staging-hv-fix-sleeping-while-atomic-issue.patch
- 0004-staging-hv-fix-netvsc-sleeping-while-atomic.patch
-
- setlocalversion.patch
kernelconfig.x86
kernelconfig.x86_64
"
@@ -146,15 +137,7 @@ firmware() {
mv "$pkgdir"/lib/firmware "$subpkgdir"/lib/
}
-md5sums="091abeb4684ce03d1d936851618687b6 linux-2.6.35.tar.bz2
-0741b3219a009fdfc41c766416007fcf patch-2.6.35.10.bz2
-3855791bfc2ee4fff05fc3c783a3a477 grsecurity-2.2.0-2.6.35.10-unofficial.patch
-eed5bd98c0a3b976891c897763eceff5 0001-xfrm-use-gre-key-as-flow-upper-protocol-info.patch
-ea7a7eb2775b71ae5ef24d029a4905bd xfrm-fix-gre-key-endianess.patch
-776adeeb5272093574f8836c5037dd7d 0004-arp-flush-arp-cache-on-device-change.patch
-139b39da44ecb577275be53d7d365949 r8169-add-gro-support.patch
-648d8b477248f233c318a3b7a961febf 0001-Staging-hv-fix-sleeping-while-atomic-issue.patch
-7cae2d1e1947fa57d7aaaf31c649471c 0004-staging-hv-fix-netvsc-sleeping-while-atomic.patch
-8c224ba0cdf0aa572c7eb50379435be4 setlocalversion.patch
+md5sums="c8ee37b4fdccdb651e0603d35350b434 linux-2.6.37.tar.bz2
+f56bdcd16fa540cddf075be6842edaaa grsecurity-2.2.1-2.6.37-201102121148.patch
b331582845fd4a702591b67eec47d687 kernelconfig.x86
1f9af7129d5b44eb3c3a60237af4ef70 kernelconfig.x86_64"
diff --git a/main/linux-grsec/grsecurity-2.2.0-2.6.35.10-unofficial.patch b/main/linux-grsec/grsecurity-2.2.1-2.6.37-201102121148.patch
index bfdddfb8fd4..e66397d24ca 100644
--- a/main/linux-grsec/grsecurity-2.2.0-2.6.35.10-unofficial.patch
+++ b/main/linux-grsec/grsecurity-2.2.1-2.6.37-201102121148.patch
@@ -1,198 +1,6 @@
-diff --git a/Documentation/dontdiff b/Documentation/dontdiff
-index d9bcffd..32fb41b 100644
---- a/Documentation/dontdiff
-+++ b/Documentation/dontdiff
-@@ -3,6 +3,7 @@
- *.bin
- *.cpio
- *.csp
-+*.dbg
- *.dsp
- *.dvi
- *.elf
-@@ -38,8 +39,10 @@
- *.tab.h
- *.tex
- *.ver
-+*.vim
- *.xml
- *_MODULES
-+*_reg_safe.h
- *_vga16.c
- *~
- *.9
-@@ -49,11 +52,16 @@
- 53c700_d.h
- CVS
- ChangeSet
-+GPATH
-+GRTAGS
-+GSYMS
-+GTAGS
- Image
- Kerntypes
- Module.markers
- Module.symvers
- PENDING
-+PERF*
- SCCS
- System.map*
- TAGS
-@@ -76,7 +84,10 @@ btfixupprep
- build
- bvmlinux
- bzImage*
-+capflags.c
- classlist.h*
-+clut_vga16.c
-+common-cmds.h
- comp*.log
- compile.h*
- conf
-@@ -100,19 +111,22 @@ fore200e_mkfirm
- fore200e_pca_fw.c*
- gconf
- gen-devlist
-+gen-kdb_cmds.c
- gen_crc32table
- gen_init_cpio
- generated
- genheaders
- genksyms
- *_gray256.c
-+hash
- ihex2fw
- ikconfig.h*
-+inat-tables.c
- initramfs_data.cpio
-+initramfs_data.cpio.bz2
- initramfs_data.cpio.gz
- initramfs_list
- kallsyms
--kconfig
- keywords.c
- ksym.c*
- ksym.h*
-@@ -136,10 +150,13 @@ mkboot
- mkbugboot
- mkcpustr
- mkdep
-+mkpiggy
- mkprep
-+mkregtable
- mktables
- mktree
- modpost
-+modules.builtin
- modules.order
- modversions.h*
- ncscope.*
-@@ -151,7 +168,9 @@ parse.h
- patches*
- pca200e.bin
- pca200e_ecd.bin2
-+perf-archive
- piggy.gz
-+piggy.S
- piggyback
- pnmtologo
- ppc_defs.h*
-@@ -160,12 +179,14 @@ qconf
- raid6altivec*.c
- raid6int*.c
- raid6tables.c
-+regdb.c
- relocs
- series
- setup
- setup.bin
- setup.elf
- sImage
-+slabinfo
- sm_tbl*
- split-include
- syscalltab.h
-@@ -189,14 +210,20 @@ version.h*
- vmlinux
- vmlinux-*
- vmlinux.aout
-+vmlinux.bin.all
-+vmlinux.bin.bz2
- vmlinux.lds
-+vmlinux.relocs
-+voffset.h
- vsyscall.lds
- vsyscall_32.lds
- wanxlfw.inc
- uImage
- unifdef
-+utsrelease.h
- wakeup.bin
- wakeup.elf
- wakeup.lds
- zImage*
- zconf.hash.c
-+zoffset.h
-diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
-index 931c806..e6f6ff1 100644
---- a/Documentation/filesystems/sysfs.txt
-+++ b/Documentation/filesystems/sysfs.txt
-@@ -123,8 +123,8 @@ set of sysfs operations for forwarding read and write calls to the
- show and store methods of the attribute owners.
-
- struct sysfs_ops {
-- ssize_t (*show)(struct kobject *, struct attribute *, char *);
-- ssize_t (*store)(struct kobject *, struct attribute *, const char *);
-+ ssize_t (* const show)(struct kobject *, struct attribute *, char *);
-+ ssize_t (* const store)(struct kobject *, struct attribute *, const char *);
- };
-
- [ Subsystems should have already defined a struct kobj_type as a
-diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 2b2407d..4ebd036 100644
---- a/Documentation/kernel-parameters.txt
-+++ b/Documentation/kernel-parameters.txt
-@@ -1910,6 +1910,12 @@ and is between 256 and 4096 characters. It is defined in the file
- the specified number of seconds. This is to be used if
- your oopses keep scrolling off the screen.
-
-+ pax_nouderef [X86-32] disables UDEREF. Most likely needed under certain
-+ virtualization environments that don't cope well with the
-+ expand down segment used by UDEREF on X86-32.
-+
-+ pax_softmode= [X86-32] 0/1 to disable/enable PaX softmode on boot already.
-+
- pcbit= [HW,ISDN]
-
- pcd. [PARIDE]
-diff --git a/Makefile b/Makefile
-index 6538501..6616857 100644
---- a/Makefile
-+++ b/Makefile
-@@ -230,8 +230,8 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
-
- HOSTCC = gcc
- HOSTCXX = g++
--HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
--HOSTCXXFLAGS = -O2
-+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
-+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
-
- # Decide whether to build built-in, modular, or both.
- # Normally, just do built-in.
-@@ -650,7 +650,7 @@ export mod_strip_cmd
-
-
- ifeq ($(KBUILD_EXTMOD),)
--core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
-+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
-
- vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
- $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
-index 1bce816..d5b403b 100644
---- a/arch/alpha/include/asm/dma-mapping.h
-+++ b/arch/alpha/include/asm/dma-mapping.h
+diff -urNp linux-2.6.37/arch/alpha/include/asm/dma-mapping.h linux-2.6.37/arch/alpha/include/asm/dma-mapping.h
+--- linux-2.6.37/arch/alpha/include/asm/dma-mapping.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/alpha/include/asm/dma-mapping.h 2011-01-17 02:41:00.000000000 -0500
@@ -3,9 +3,9 @@
#include <linux/dma-attrs.h>
@@ -205,11 +13,10 @@ index 1bce816..d5b403b 100644
{
return dma_ops;
}
-diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
-index 9baae8a..8d8bc6b 100644
---- a/arch/alpha/include/asm/elf.h
-+++ b/arch/alpha/include/asm/elf.h
-@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+diff -urNp linux-2.6.37/arch/alpha/include/asm/elf.h linux-2.6.37/arch/alpha/include/asm/elf.h
+--- linux-2.6.37/arch/alpha/include/asm/elf.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/alpha/include/asm/elf.h 2011-01-17 02:41:00.000000000 -0500
+@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
@@ -223,10 +30,9 @@ index 9baae8a..8d8bc6b 100644
/* $0 is set by ld.so to a pointer to a function which might be
registered using atexit. This provides a mean for the dynamic
linker to call DT_FINI functions for shared libraries that have
-diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
-index 71a2432..bb30d23 100644
---- a/arch/alpha/include/asm/pgtable.h
-+++ b/arch/alpha/include/asm/pgtable.h
+diff -urNp linux-2.6.37/arch/alpha/include/asm/pgtable.h linux-2.6.37/arch/alpha/include/asm/pgtable.h
+--- linux-2.6.37/arch/alpha/include/asm/pgtable.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/alpha/include/asm/pgtable.h 2011-01-17 02:41:00.000000000 -0500
@@ -101,6 +101,17 @@ struct vm_area_struct;
#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
@@ -245,11 +51,10 @@ index 71a2432..bb30d23 100644
#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
-diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
-index ebc3c89..20cfa63 100644
---- a/arch/alpha/kernel/module.c
-+++ b/arch/alpha/kernel/module.c
-@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+diff -urNp linux-2.6.37/arch/alpha/kernel/module.c linux-2.6.37/arch/alpha/kernel/module.c
+--- linux-2.6.37/arch/alpha/kernel/module.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/alpha/kernel/module.c 2011-01-17 02:41:00.000000000 -0500
+@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
/* The small sections were sorted to the end of the segment.
The following should definitely cover them. */
@@ -258,11 +63,10 @@ index ebc3c89..20cfa63 100644
got = sechdrs[me->arch.gotsecindex].sh_addr;
for (i = 0; i < n; i++) {
-diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
-index de9d397..22afe8a 100644
---- a/arch/alpha/kernel/osf_sys.c
-+++ b/arch/alpha/kernel/osf_sys.c
-@@ -1170,7 +1170,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+diff -urNp linux-2.6.37/arch/alpha/kernel/osf_sys.c linux-2.6.37/arch/alpha/kernel/osf_sys.c
+--- linux-2.6.37/arch/alpha/kernel/osf_sys.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/alpha/kernel/osf_sys.c 2011-01-17 02:41:00.000000000 -0500
+@@ -1165,7 +1165,7 @@ arch_get_unmapped_area_1(unsigned long a
/* At this point: (!vma || addr < vma->vm_end). */
if (limit - len < addr)
return -ENOMEM;
@@ -271,7 +75,7 @@ index de9d397..22afe8a 100644
return addr;
addr = vma->vm_end;
vma = vma->vm_next;
-@@ -1206,6 +1206,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1201,6 +1201,10 @@ arch_get_unmapped_area(struct file *filp
merely specific addresses, but regions of memory -- perhaps
this feature should be incorporated into all ports? */
@@ -282,7 +86,7 @@ index de9d397..22afe8a 100644
if (addr) {
addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
if (addr != (unsigned long) -ENOMEM)
-@@ -1213,8 +1217,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1208,8 +1212,8 @@ arch_get_unmapped_area(struct file *filp
}
/* Next, try allocating at TASK_UNMAPPED_BASE. */
@@ -293,11 +97,29 @@ index de9d397..22afe8a 100644
if (addr != (unsigned long) -ENOMEM)
return addr;
-diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
-index 246100e..f05bd14 100644
---- a/arch/alpha/kernel/pci-noop.c
-+++ b/arch/alpha/kernel/pci-noop.c
-@@ -173,7 +173,7 @@ static int alpha_noop_set_mask(struct device *dev, u64 mask)
+diff -urNp linux-2.6.37/arch/alpha/kernel/pci_iommu.c linux-2.6.37/arch/alpha/kernel/pci_iommu.c
+--- linux-2.6.37/arch/alpha/kernel/pci_iommu.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/alpha/kernel/pci_iommu.c 2011-01-17 02:41:00.000000000 -0500
+@@ -950,7 +950,7 @@ static int alpha_pci_set_mask(struct dev
+ return 0;
+ }
+
+-struct dma_map_ops alpha_pci_ops = {
++const struct dma_map_ops alpha_pci_ops = {
+ .alloc_coherent = alpha_pci_alloc_coherent,
+ .free_coherent = alpha_pci_free_coherent,
+ .map_page = alpha_pci_map_page,
+@@ -962,5 +962,5 @@ struct dma_map_ops alpha_pci_ops = {
+ .set_dma_mask = alpha_pci_set_mask,
+ };
+
+-struct dma_map_ops *dma_ops = &alpha_pci_ops;
++const struct dma_map_ops *dma_ops = &alpha_pci_ops;
+ EXPORT_SYMBOL(dma_ops);
+diff -urNp linux-2.6.37/arch/alpha/kernel/pci-noop.c linux-2.6.37/arch/alpha/kernel/pci-noop.c
+--- linux-2.6.37/arch/alpha/kernel/pci-noop.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/alpha/kernel/pci-noop.c 2011-01-17 02:41:00.000000000 -0500
+@@ -173,7 +173,7 @@ static int alpha_noop_set_mask(struct de
return 0;
}
@@ -315,31 +137,10 @@ index 246100e..f05bd14 100644
EXPORT_SYMBOL(dma_ops);
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
-index d1dbd9a..664a19c 100644
---- a/arch/alpha/kernel/pci_iommu.c
-+++ b/arch/alpha/kernel/pci_iommu.c
-@@ -950,7 +950,7 @@ static int alpha_pci_set_mask(struct device *dev, u64 mask)
- return 0;
- }
-
--struct dma_map_ops alpha_pci_ops = {
-+const struct dma_map_ops alpha_pci_ops = {
- .alloc_coherent = alpha_pci_alloc_coherent,
- .free_coherent = alpha_pci_free_coherent,
- .map_page = alpha_pci_map_page,
-@@ -962,5 +962,5 @@ struct dma_map_ops alpha_pci_ops = {
- .set_dma_mask = alpha_pci_set_mask,
- };
-
--struct dma_map_ops *dma_ops = &alpha_pci_ops;
-+const struct dma_map_ops *dma_ops = &alpha_pci_ops;
- EXPORT_SYMBOL(dma_ops);
-diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
-index fadd5f8..3168191 100644
---- a/arch/alpha/mm/fault.c
-+++ b/arch/alpha/mm/fault.c
-@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
+diff -urNp linux-2.6.37/arch/alpha/mm/fault.c linux-2.6.37/arch/alpha/mm/fault.c
+--- linux-2.6.37/arch/alpha/mm/fault.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/alpha/mm/fault.c 2011-01-17 02:41:00.000000000 -0500
+@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
__reload_thread(pcb);
}
@@ -464,7 +265,7 @@ index fadd5f8..3168191 100644
/*
* This routine handles page faults. It determines the address,
-@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
+@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
good_area:
si_code = SEGV_ACCERR;
if (cause < 0) {
@@ -495,11 +296,10 @@ index fadd5f8..3168191 100644
} else if (!cause) {
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
-diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
-index 51662fe..2939eb4 100644
---- a/arch/arm/include/asm/elf.h
-+++ b/arch/arm/include/asm/elf.h
-@@ -111,7 +111,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+diff -urNp linux-2.6.37/arch/arm/include/asm/elf.h linux-2.6.37/arch/arm/include/asm/elf.h
+--- linux-2.6.37/arch/arm/include/asm/elf.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/include/asm/elf.h 2011-01-17 02:41:00.000000000 -0500
+@@ -113,7 +113,14 @@ int dump_task_regs(struct task_struct *t
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
@@ -515,10 +315,20 @@ index 51662fe..2939eb4 100644
/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we
-diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
-index e51b1e8..32a3113 100644
---- a/arch/arm/include/asm/kmap_types.h
-+++ b/arch/arm/include/asm/kmap_types.h
+@@ -123,10 +130,6 @@ int dump_task_regs(struct task_struct *t
+ extern void elf_set_personality(const struct elf32_hdr *);
+ #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
+
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ extern int vectors_user_mapping(void);
+ #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+diff -urNp linux-2.6.37/arch/arm/include/asm/kmap_types.h linux-2.6.37/arch/arm/include/asm/kmap_types.h
+--- linux-2.6.37/arch/arm/include/asm/kmap_types.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/include/asm/kmap_types.h 2011-01-17 02:41:00.000000000 -0500
@@ -21,6 +21,7 @@ enum km_type {
KM_L1_CACHE,
KM_L2_CACHE,
@@ -527,11 +337,10 @@ index e51b1e8..32a3113 100644
KM_TYPE_NR
};
-diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
-index 33e4a48..243db72 100644
---- a/arch/arm/include/asm/uaccess.h
-+++ b/arch/arm/include/asm/uaccess.h
-@@ -403,6 +403,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
+diff -urNp linux-2.6.37/arch/arm/include/asm/uaccess.h linux-2.6.37/arch/arm/include/asm/uaccess.h
+--- linux-2.6.37/arch/arm/include/asm/uaccess.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/include/asm/uaccess.h 2011-01-17 02:41:00.000000000 -0500
+@@ -403,6 +403,9 @@ extern unsigned long __must_check __strn
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
@@ -541,7 +350,7 @@ index 33e4a48..243db72 100644
if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
else /* security hole - plug it */
-@@ -412,6 +415,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+@@ -412,6 +415,9 @@ static inline unsigned long __must_check
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
@@ -551,11 +360,10 @@ index 33e4a48..243db72 100644
if (access_ok(VERIFY_WRITE, to, n))
n = __copy_to_user(to, from, n);
return n;
-diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
-index c868a88..7c22c9f 100644
---- a/arch/arm/kernel/kgdb.c
-+++ b/arch/arm/kernel/kgdb.c
-@@ -208,7 +208,7 @@ void kgdb_arch_exit(void)
+diff -urNp linux-2.6.37/arch/arm/kernel/kgdb.c linux-2.6.37/arch/arm/kernel/kgdb.c
+--- linux-2.6.37/arch/arm/kernel/kgdb.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/kernel/kgdb.c 2011-01-17 02:41:00.000000000 -0500
+@@ -246,7 +246,7 @@ void kgdb_arch_exit(void)
* and we handle the normal undef case within the do_undefinstr
* handler.
*/
@@ -564,11 +372,34 @@ index c868a88..7c22c9f 100644
#ifndef __ARMEB__
.gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
#else /* ! __ARMEB__ */
-diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
-index 6156689..fca5b29 100644
---- a/arch/arm/mach-at91/pm.c
-+++ b/arch/arm/mach-at91/pm.c
-@@ -294,7 +294,7 @@ static void at91_pm_end(void)
+diff -urNp linux-2.6.37/arch/arm/kernel/process.c linux-2.6.37/arch/arm/kernel/process.c
+--- linux-2.6.37/arch/arm/kernel/process.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/kernel/process.c 2011-01-17 02:41:00.000000000 -0500
+@@ -28,7 +28,6 @@
+ #include <linux/tick.h>
+ #include <linux/utsname.h>
+ #include <linux/uaccess.h>
+-#include <linux/random.h>
+ #include <linux/hw_breakpoint.h>
+
+ #include <asm/cacheflush.h>
+@@ -477,12 +476,6 @@ unsigned long get_wchan(struct task_stru
+ return 0;
+ }
+
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
+-
+ /*
+ * The vectors page is always readable from user space for the
+ * atomic helpers and the signal restart code. Let's declare a mapping
+diff -urNp linux-2.6.37/arch/arm/mach-at91/pm.c linux-2.6.37/arch/arm/mach-at91/pm.c
+--- linux-2.6.37/arch/arm/mach-at91/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-at91/pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -301,7 +301,7 @@ static void at91_pm_end(void)
}
@@ -577,11 +408,10 @@ index 6156689..fca5b29 100644
.valid = at91_pm_valid_state,
.begin = at91_pm_begin,
.enter = at91_pm_enter,
-diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
-index fab953b..1bd73a0 100644
---- a/arch/arm/mach-davinci/pm.c
-+++ b/arch/arm/mach-davinci/pm.c
-@@ -110,7 +110,7 @@ static int davinci_pm_enter(suspend_state_t state)
+diff -urNp linux-2.6.37/arch/arm/mach-davinci/pm.c linux-2.6.37/arch/arm/mach-davinci/pm.c
+--- linux-2.6.37/arch/arm/mach-davinci/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-davinci/pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -110,7 +110,7 @@ static int davinci_pm_enter(suspend_stat
return ret;
}
@@ -590,23 +420,45 @@ index fab953b..1bd73a0 100644
.enter = davinci_pm_enter,
.valid = suspend_valid_only_mem,
};
-diff --git a/arch/arm/mach-msm/last_radio_log.c b/arch/arm/mach-msm/last_radio_log.c
-index b64ba5a..fb24fa6 100644
---- a/arch/arm/mach-msm/last_radio_log.c
-+++ b/arch/arm/mach-msm/last_radio_log.c
-@@ -47,6 +47,7 @@ static ssize_t last_radio_log_read(struct file *file, char __user *buf,
+diff -urNp linux-2.6.37/arch/arm/mach-imx/pm-imx27.c linux-2.6.37/arch/arm/mach-imx/pm-imx27.c
+--- linux-2.6.37/arch/arm/mach-imx/pm-imx27.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-imx/pm-imx27.c 2011-01-17 02:41:00.000000000 -0500
+@@ -32,7 +32,7 @@ static int mx27_suspend_enter(suspend_st
+ return 0;
+ }
+
+-static struct platform_suspend_ops mx27_suspend_ops = {
++static const struct platform_suspend_ops mx27_suspend_ops = {
+ .enter = mx27_suspend_enter,
+ .valid = suspend_valid_only_mem,
+ };
+diff -urNp linux-2.6.37/arch/arm/mach-lpc32xx/pm.c linux-2.6.37/arch/arm/mach-lpc32xx/pm.c
+--- linux-2.6.37/arch/arm/mach-lpc32xx/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-lpc32xx/pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -123,7 +123,7 @@ static int lpc32xx_pm_enter(suspend_stat
+ return 0;
+ }
+
+-static struct platform_suspend_ops lpc32xx_pm_ops = {
++static const struct platform_suspend_ops lpc32xx_pm_ops = {
+ .valid = suspend_valid_only_mem,
+ .enter = lpc32xx_pm_enter,
+ };
+diff -urNp linux-2.6.37/arch/arm/mach-msm/last_radio_log.c linux-2.6.37/arch/arm/mach-msm/last_radio_log.c
+--- linux-2.6.37/arch/arm/mach-msm/last_radio_log.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-msm/last_radio_log.c 2011-01-17 02:41:00.000000000 -0500
+@@ -47,6 +47,7 @@ static ssize_t last_radio_log_read(struc
return count;
}
+/* cannot be const, see msm_init_last_radio_log */
static struct file_operations last_radio_log_fops = {
- .read = last_radio_log_read
- };
-diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
-index b1d3f9f..6e126ce 100644
---- a/arch/arm/mach-omap1/pm.c
-+++ b/arch/arm/mach-omap1/pm.c
-@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
+ .read = last_radio_log_read,
+ .llseek = default_llseek,
+diff -urNp linux-2.6.37/arch/arm/mach-omap1/pm.c linux-2.6.37/arch/arm/mach-omap1/pm.c
+--- linux-2.6.37/arch/arm/mach-omap1/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-omap1/pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
@@ -615,24 +467,22 @@ index b1d3f9f..6e126ce 100644
.prepare = omap_pm_prepare,
.enter = omap_pm_enter,
.finish = omap_pm_finish,
-diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
-index e321281..57b0f5c 100644
---- a/arch/arm/mach-omap2/pm24xx.c
-+++ b/arch/arm/mach-omap2/pm24xx.c
-@@ -325,7 +325,7 @@ static void omap2_pm_finish(void)
- enable_hlt();
+diff -urNp linux-2.6.37/arch/arm/mach-omap2/pm24xx.c linux-2.6.37/arch/arm/mach-omap2/pm24xx.c
+--- linux-2.6.37/arch/arm/mach-omap2/pm24xx.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-omap2/pm24xx.c 2011-01-17 02:41:00.000000000 -0500
+@@ -359,7 +359,7 @@ static void omap2_pm_end(void)
+ suspend_state = PM_SUSPEND_ON;
}
-static struct platform_suspend_ops omap_pm_ops = {
+static const struct platform_suspend_ops omap_pm_ops = {
+ .begin = omap2_pm_begin,
.prepare = omap2_pm_prepare,
.enter = omap2_pm_enter,
- .finish = omap2_pm_finish,
-diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
-index b88737f..1514908 100644
---- a/arch/arm/mach-omap2/pm34xx.c
-+++ b/arch/arm/mach-omap2/pm34xx.c
-@@ -669,7 +669,7 @@ static void omap3_pm_end(void)
+diff -urNp linux-2.6.37/arch/arm/mach-omap2/pm34xx.c linux-2.6.37/arch/arm/mach-omap2/pm34xx.c
+--- linux-2.6.37/arch/arm/mach-omap2/pm34xx.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-omap2/pm34xx.c 2011-01-17 02:41:00.000000000 -0500
+@@ -617,7 +617,7 @@ static void omap3_pm_end(void)
return;
}
@@ -641,11 +491,22 @@ index b88737f..1514908 100644
.begin = omap3_pm_begin,
.end = omap3_pm_end,
.prepare = omap3_pm_prepare,
-diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
-index ee3c29c..f3e60a0 100644
---- a/arch/arm/mach-pnx4008/pm.c
-+++ b/arch/arm/mach-pnx4008/pm.c
-@@ -119,7 +119,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
+diff -urNp linux-2.6.37/arch/arm/mach-omap2/pm44xx.c linux-2.6.37/arch/arm/mach-omap2/pm44xx.c
+--- linux-2.6.37/arch/arm/mach-omap2/pm44xx.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-omap2/pm44xx.c 2011-01-17 02:41:00.000000000 -0500
+@@ -75,7 +75,7 @@ static void omap4_pm_end(void)
+ return;
+ }
+
+-static struct platform_suspend_ops omap_pm_ops = {
++static const struct platform_suspend_ops omap_pm_ops = {
+ .begin = omap4_pm_begin,
+ .end = omap4_pm_end,
+ .prepare = omap4_pm_prepare,
+diff -urNp linux-2.6.37/arch/arm/mach-pnx4008/pm.c linux-2.6.37/arch/arm/mach-pnx4008/pm.c
+--- linux-2.6.37/arch/arm/mach-pnx4008/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-pnx4008/pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -119,7 +119,7 @@ static int pnx4008_pm_valid(suspend_stat
(state == PM_SUSPEND_MEM);
}
@@ -654,10 +515,9 @@ index ee3c29c..f3e60a0 100644
.enter = pnx4008_pm_enter,
.valid = pnx4008_pm_valid,
};
-diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
-index 166c15f..978e1b2 100644
---- a/arch/arm/mach-pxa/pm.c
-+++ b/arch/arm/mach-pxa/pm.c
+diff -urNp linux-2.6.37/arch/arm/mach-pxa/pm.c linux-2.6.37/arch/arm/mach-pxa/pm.c
+--- linux-2.6.37/arch/arm/mach-pxa/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-pxa/pm.c 2011-01-17 02:41:00.000000000 -0500
@@ -96,7 +96,7 @@ void pxa_pm_finish(void)
pxa_cpu_pm_fns->finish();
}
@@ -667,11 +527,10 @@ index 166c15f..978e1b2 100644
.valid = pxa_pm_valid,
.enter = pxa_pm_enter,
.prepare = pxa_pm_prepare,
-diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
-index cb47672..0a366cc 100644
---- a/arch/arm/mach-pxa/sharpsl_pm.c
-+++ b/arch/arm/mach-pxa/sharpsl_pm.c
-@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
+diff -urNp linux-2.6.37/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.37/arch/arm/mach-pxa/sharpsl_pm.c
+--- linux-2.6.37/arch/arm/mach-pxa/sharpsl_pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-pxa/sharpsl_pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -868,7 +868,7 @@ static void sharpsl_apm_get_power_status
}
#ifdef CONFIG_PM
@@ -680,10 +539,9 @@ index cb47672..0a366cc 100644
.prepare = pxa_pm_prepare,
.finish = pxa_pm_finish,
.enter = corgi_pxa_pm_enter,
-diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
-index c83fdc8..ab9fc44 100644
---- a/arch/arm/mach-sa1100/pm.c
-+++ b/arch/arm/mach-sa1100/pm.c
+diff -urNp linux-2.6.37/arch/arm/mach-sa1100/pm.c linux-2.6.37/arch/arm/mach-sa1100/pm.c
+--- linux-2.6.37/arch/arm/mach-sa1100/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mach-sa1100/pm.c 2011-01-17 02:41:00.000000000 -0500
@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
return virt_to_phys(sp);
}
@@ -693,11 +551,10 @@ index c83fdc8..ab9fc44 100644
.enter = sa11x0_pm_enter,
.valid = suspend_valid_only_mem,
};
-diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
-index cbfb2ed..de319e0 100644
---- a/arch/arm/mm/fault.c
-+++ b/arch/arm/mm/fault.c
-@@ -167,6 +167,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
+diff -urNp linux-2.6.37/arch/arm/mm/fault.c linux-2.6.37/arch/arm/mm/fault.c
+--- linux-2.6.37/arch/arm/mm/fault.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mm/fault.c 2011-01-17 02:41:00.000000000 -0500
+@@ -167,6 +167,13 @@ __do_user_fault(struct task_struct *tsk,
}
#endif
@@ -711,7 +568,7 @@ index cbfb2ed..de319e0 100644
tsk->thread.address = addr;
tsk->thread.error_code = fsr;
tsk->thread.trap_no = 14;
-@@ -364,6 +371,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+@@ -364,6 +371,33 @@ do_page_fault(unsigned long addr, unsign
}
#endif /* CONFIG_MMU */
@@ -745,11 +602,10 @@ index cbfb2ed..de319e0 100644
/*
* First Level Translation Fault Handler
*
-diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
-index f5abc51..7ec524c 100644
---- a/arch/arm/mm/mmap.c
-+++ b/arch/arm/mm/mmap.c
-@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+diff -urNp linux-2.6.37/arch/arm/mm/mmap.c linux-2.6.37/arch/arm/mm/mmap.c
+--- linux-2.6.37/arch/arm/mm/mmap.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/mm/mmap.c 2011-01-17 02:41:00.000000000 -0500
+@@ -64,6 +64,10 @@ arch_get_unmapped_area(struct file *filp
if (len > TASK_SIZE)
return -ENOMEM;
@@ -760,7 +616,7 @@ index f5abc51..7ec524c 100644
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
-@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -71,15 +75,14 @@ arch_get_unmapped_area(struct file *filp
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
@@ -778,9 +634,9 @@ index f5abc51..7ec524c 100644
+ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
}
-
- full_search:
-@@ -94,14 +97,14 @@ full_search:
+ /* 8 bits of randomness in 20 address space bits */
+ if (current->flags & PF_RANDOMIZE)
+@@ -98,14 +101,14 @@ full_search:
* Start a new search - just in case we missed
* some holes.
*/
@@ -798,10 +654,9 @@ index f5abc51..7ec524c 100644
/*
* Remember the place where we stopped the search:
*/
-diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
-index 27cfca5..5bf3f2f 100644
---- a/arch/arm/plat-samsung/pm.c
-+++ b/arch/arm/plat-samsung/pm.c
+diff -urNp linux-2.6.37/arch/arm/plat-samsung/pm.c linux-2.6.37/arch/arm/plat-samsung/pm.c
+--- linux-2.6.37/arch/arm/plat-samsung/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/arm/plat-samsung/pm.c 2011-01-17 02:41:00.000000000 -0500
@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
s3c_pm_check_cleanup();
}
@@ -811,11 +666,10 @@ index 27cfca5..5bf3f2f 100644
.enter = s3c_pm_enter,
.prepare = s3c_pm_prepare,
.finish = s3c_pm_finish,
-diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
-index 3b3159b..425ea94 100644
---- a/arch/avr32/include/asm/elf.h
-+++ b/arch/avr32/include/asm/elf.h
-@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
+diff -urNp linux-2.6.37/arch/avr32/include/asm/elf.h linux-2.6.37/arch/avr32/include/asm/elf.h
+--- linux-2.6.37/arch/avr32/include/asm/elf.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/avr32/include/asm/elf.h 2011-01-17 02:41:00.000000000 -0500
+@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
@@ -831,10 +685,9 @@ index 3b3159b..425ea94 100644
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
-diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
-index b7f5c68..556135c 100644
---- a/arch/avr32/include/asm/kmap_types.h
-+++ b/arch/avr32/include/asm/kmap_types.h
+diff -urNp linux-2.6.37/arch/avr32/include/asm/kmap_types.h linux-2.6.37/arch/avr32/include/asm/kmap_types.h
+--- linux-2.6.37/arch/avr32/include/asm/kmap_types.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/avr32/include/asm/kmap_types.h 2011-01-17 02:41:00.000000000 -0500
@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
D(11) KM_IRQ1,
D(12) KM_SOFTIRQ0,
@@ -845,10 +698,9 @@ index b7f5c68..556135c 100644
};
#undef D
-diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
-index f021edf..32d680e 100644
---- a/arch/avr32/mach-at32ap/pm.c
-+++ b/arch/avr32/mach-at32ap/pm.c
+diff -urNp linux-2.6.37/arch/avr32/mach-at32ap/pm.c linux-2.6.37/arch/avr32/mach-at32ap/pm.c
+--- linux-2.6.37/arch/avr32/mach-at32ap/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/avr32/mach-at32ap/pm.c 2011-01-17 02:41:00.000000000 -0500
@@ -176,7 +176,7 @@ out:
return 0;
}
@@ -858,11 +710,10 @@ index f021edf..32d680e 100644
.valid = avr32_pm_valid_state,
.enter = avr32_pm_enter,
};
-diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
-index b61d86d..b7cf88f 100644
---- a/arch/avr32/mm/fault.c
-+++ b/arch/avr32/mm/fault.c
-@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
+diff -urNp linux-2.6.37/arch/avr32/mm/fault.c linux-2.6.37/arch/avr32/mm/fault.c
+--- linux-2.6.37/arch/avr32/mm/fault.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/avr32/mm/fault.c 2011-01-17 02:41:00.000000000 -0500
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
int exception_trace = 1;
@@ -886,7 +737,7 @@ index b61d86d..b7cf88f 100644
/*
* This routine handles page faults. It determines the address and the
* problem, and then passes it off to one of the appropriate routines.
-@@ -157,6 +174,16 @@ bad_area:
+@@ -156,6 +173,16 @@ bad_area:
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
@@ -903,11 +754,10 @@ index b61d86d..b7cf88f 100644
if (exception_trace && printk_ratelimit())
printk("%s%s[%d]: segfault at %08lx pc %08lx "
"sp %08lx ecr %lu\n",
-diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
-index 08bc44e..a6566ab 100644
---- a/arch/blackfin/kernel/kgdb.c
-+++ b/arch/blackfin/kernel/kgdb.c
-@@ -397,7 +397,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
+diff -urNp linux-2.6.37/arch/blackfin/kernel/kgdb.c linux-2.6.37/arch/blackfin/kernel/kgdb.c
+--- linux-2.6.37/arch/blackfin/kernel/kgdb.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/blackfin/kernel/kgdb.c 2011-01-17 02:41:00.000000000 -0500
+@@ -397,7 +397,7 @@ int kgdb_arch_handle_exception(int vecto
return -1; /* this means that we do not want to exit from the handler */
}
@@ -916,11 +766,10 @@ index 08bc44e..a6566ab 100644
.gdb_bpt_instr = {0xa1},
#ifdef CONFIG_SMP
.flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
-diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
-index ea7f95f..a1b1ba5 100644
---- a/arch/blackfin/mach-common/pm.c
-+++ b/arch/blackfin/mach-common/pm.c
-@@ -232,7 +232,7 @@ static int bfin_pm_enter(suspend_state_t state)
+diff -urNp linux-2.6.37/arch/blackfin/mach-common/pm.c linux-2.6.37/arch/blackfin/mach-common/pm.c
+--- linux-2.6.37/arch/blackfin/mach-common/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/blackfin/mach-common/pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -233,7 +233,7 @@ static int bfin_pm_enter(suspend_state_t
return 0;
}
@@ -929,11 +778,10 @@ index ea7f95f..a1b1ba5 100644
.enter = bfin_pm_enter,
.valid = bfin_pm_valid,
};
-diff --git a/arch/blackfin/mm/maccess.c b/arch/blackfin/mm/maccess.c
-index b71cebc..e253211 100644
---- a/arch/blackfin/mm/maccess.c
-+++ b/arch/blackfin/mm/maccess.c
-@@ -16,7 +16,7 @@ static int validate_memory_access_address(unsigned long addr, int size)
+diff -urNp linux-2.6.37/arch/blackfin/mm/maccess.c linux-2.6.37/arch/blackfin/mm/maccess.c
+--- linux-2.6.37/arch/blackfin/mm/maccess.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/blackfin/mm/maccess.c 2011-01-17 02:41:00.000000000 -0500
+@@ -16,7 +16,7 @@ static int validate_memory_access_addres
return bfin_mem_access_type(addr, size);
}
@@ -942,7 +790,7 @@ index b71cebc..e253211 100644
{
unsigned long lsrc = (unsigned long)src;
int mem_type;
-@@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
+@@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void *
return -EFAULT;
}
@@ -951,10 +799,9 @@ index b71cebc..e253211 100644
{
unsigned long ldst = (unsigned long)dst;
int mem_type;
-diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
-index f8e16b2..c73ff79 100644
---- a/arch/frv/include/asm/kmap_types.h
-+++ b/arch/frv/include/asm/kmap_types.h
+diff -urNp linux-2.6.37/arch/frv/include/asm/kmap_types.h linux-2.6.37/arch/frv/include/asm/kmap_types.h
+--- linux-2.6.37/arch/frv/include/asm/kmap_types.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/frv/include/asm/kmap_types.h 2011-01-17 02:41:00.000000000 -0500
@@ -23,6 +23,7 @@ enum km_type {
KM_IRQ1,
KM_SOFTIRQ0,
@@ -963,11 +810,10 @@ index f8e16b2..c73ff79 100644
KM_TYPE_NR
};
-diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
-index 385fd30..6c3d97e 100644
---- a/arch/frv/mm/elf-fdpic.c
-+++ b/arch/frv/mm/elf-fdpic.c
-@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+diff -urNp linux-2.6.37/arch/frv/mm/elf-fdpic.c linux-2.6.37/arch/frv/mm/elf-fdpic.c
+--- linux-2.6.37/arch/frv/mm/elf-fdpic.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/frv/mm/elf-fdpic.c 2011-01-17 02:41:00.000000000 -0500
+@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(current->mm, addr);
@@ -977,7 +823,7 @@ index 385fd30..6c3d97e 100644
goto success;
}
-@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
for (; vma; vma = vma->vm_next) {
if (addr > limit)
break;
@@ -986,7 +832,7 @@ index 385fd30..6c3d97e 100644
goto success;
addr = vma->vm_end;
}
-@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
for (; vma; vma = vma->vm_next) {
if (addr > limit)
break;
@@ -995,10 +841,9 @@ index 385fd30..6c3d97e 100644
goto success;
addr = vma->vm_end;
}
-diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
-index e4a80d8..11a7ea1 100644
---- a/arch/ia64/hp/common/hwsw_iommu.c
-+++ b/arch/ia64/hp/common/hwsw_iommu.c
+diff -urNp linux-2.6.37/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.37/arch/ia64/hp/common/hwsw_iommu.c
+--- linux-2.6.37/arch/ia64/hp/common/hwsw_iommu.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/hp/common/hwsw_iommu.c 2011-01-17 02:41:00.000000000 -0500
@@ -17,7 +17,7 @@
#include <linux/swiotlb.h>
#include <asm/machvec.h>
@@ -1008,7 +853,7 @@ index e4a80d8..11a7ea1 100644
/* swiotlb declarations & definitions: */
extern int swiotlb_late_init_with_default_size (size_t size);
-@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
+@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
!sba_dma_ops.dma_supported(dev, *dev->dma_mask);
}
@@ -1017,11 +862,10 @@ index e4a80d8..11a7ea1 100644
{
if (use_swiotlb(dev))
return &swiotlb_dma_ops;
-diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
-index 4ce8d13..62d8118 100644
---- a/arch/ia64/hp/common/sba_iommu.c
-+++ b/arch/ia64/hp/common/sba_iommu.c
-@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
+diff -urNp linux-2.6.37/arch/ia64/hp/common/sba_iommu.c linux-2.6.37/arch/ia64/hp/common/sba_iommu.c
+--- linux-2.6.37/arch/ia64/hp/common/sba_iommu.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/hp/common/sba_iommu.c 2011-01-17 02:41:00.000000000 -0500
+@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
},
};
@@ -1039,10 +883,9 @@ index 4ce8d13..62d8118 100644
.alloc_coherent = sba_alloc_coherent,
.free_coherent = sba_free_coherent,
.map_page = sba_map_page,
-diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
-index 7d09a09..a74b3b0 100644
---- a/arch/ia64/include/asm/dma-mapping.h
-+++ b/arch/ia64/include/asm/dma-mapping.h
+diff -urNp linux-2.6.37/arch/ia64/include/asm/dma-mapping.h linux-2.6.37/arch/ia64/include/asm/dma-mapping.h
+--- linux-2.6.37/arch/ia64/include/asm/dma-mapping.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/include/asm/dma-mapping.h 2011-01-17 02:41:00.000000000 -0500
@@ -12,7 +12,7 @@
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
@@ -1052,7 +895,7 @@ index 7d09a09..a74b3b0 100644
extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void);
-@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
+@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *daddr, gfp_t gfp)
{
@@ -1061,7 +904,7 @@ index 7d09a09..a74b3b0 100644
void *caddr;
caddr = ops->alloc_coherent(dev, size, daddr, gfp);
-@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
static inline void dma_free_coherent(struct device *dev, size_t size,
void *caddr, dma_addr_t daddr)
{
@@ -1070,7 +913,7 @@ index 7d09a09..a74b3b0 100644
debug_dma_free_coherent(dev, size, caddr, daddr);
ops->free_coherent(dev, size, caddr, daddr);
}
-@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
+@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
{
@@ -1086,10 +929,9 @@ index 7d09a09..a74b3b0 100644
return ops->dma_supported(dev, mask);
}
-diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
-index b5298eb..67c6e62 100644
---- a/arch/ia64/include/asm/elf.h
-+++ b/arch/ia64/include/asm/elf.h
+diff -urNp linux-2.6.37/arch/ia64/include/asm/elf.h linux-2.6.37/arch/ia64/include/asm/elf.h
+--- linux-2.6.37/arch/ia64/include/asm/elf.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/include/asm/elf.h 2011-01-17 02:41:00.000000000 -0500
@@ -42,6 +42,13 @@
*/
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
@@ -1104,11 +946,10 @@ index b5298eb..67c6e62 100644
#define PT_IA_64_UNWIND 0x70000001
/* IA-64 relocations: */
-diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
-index 367d299..9ad4279 100644
---- a/arch/ia64/include/asm/machvec.h
-+++ b/arch/ia64/include/asm/machvec.h
-@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
+diff -urNp linux-2.6.37/arch/ia64/include/asm/machvec.h linux-2.6.37/arch/ia64/include/asm/machvec.h
+--- linux-2.6.37/arch/ia64/include/asm/machvec.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/include/asm/machvec.h 2011-01-17 02:41:00.000000000 -0500
+@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
typedef u64 ia64_mv_dma_get_required_mask (struct device *);
@@ -1117,7 +958,7 @@ index 367d299..9ad4279 100644
/*
* WARNING: The legacy I/O space is _architected_. Platforms are
-@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
+@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
# endif /* CONFIG_IA64_GENERIC */
extern void swiotlb_dma_init(void);
@@ -1126,10 +967,9 @@ index 367d299..9ad4279 100644
/*
* Define default versions so we can extend machvec for new platforms without having
-diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
-index c3286f4..ed33359 100644
---- a/arch/ia64/include/asm/pgtable.h
-+++ b/arch/ia64/include/asm/pgtable.h
+diff -urNp linux-2.6.37/arch/ia64/include/asm/pgtable.h linux-2.6.37/arch/ia64/include/asm/pgtable.h
+--- linux-2.6.37/arch/ia64/include/asm/pgtable.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/include/asm/pgtable.h 2011-01-17 02:41:00.000000000 -0500
@@ -12,7 +12,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
@@ -1157,11 +997,10 @@ index c3286f4..ed33359 100644
#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
-diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
-index 449c8c0..432a3d2 100644
---- a/arch/ia64/include/asm/uaccess.h
-+++ b/arch/ia64/include/asm/uaccess.h
-@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+diff -urNp linux-2.6.37/arch/ia64/include/asm/uaccess.h linux-2.6.37/arch/ia64/include/asm/uaccess.h
+--- linux-2.6.37/arch/ia64/include/asm/uaccess.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/include/asm/uaccess.h 2011-01-17 02:41:00.000000000 -0500
+@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
const void *__cu_from = (from); \
long __cu_len = (n); \
\
@@ -1170,7 +1009,7 @@ index 449c8c0..432a3d2 100644
__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
__cu_len; \
})
-@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
long __cu_len = (n); \
\
__chk_user_ptr(__cu_from); \
@@ -1179,10 +1018,9 @@ index 449c8c0..432a3d2 100644
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
__cu_len; \
})
-diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
-index f2c1600..969398a 100644
---- a/arch/ia64/kernel/dma-mapping.c
-+++ b/arch/ia64/kernel/dma-mapping.c
+diff -urNp linux-2.6.37/arch/ia64/kernel/dma-mapping.c linux-2.6.37/arch/ia64/kernel/dma-mapping.c
+--- linux-2.6.37/arch/ia64/kernel/dma-mapping.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/kernel/dma-mapping.c 2011-01-17 02:41:00.000000000 -0500
@@ -3,7 +3,7 @@
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly;
@@ -1201,10 +1039,9 @@ index f2c1600..969398a 100644
{
return dma_ops;
}
-diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
-index 1481b0a..e7d38ff 100644
---- a/arch/ia64/kernel/module.c
-+++ b/arch/ia64/kernel/module.c
+diff -urNp linux-2.6.37/arch/ia64/kernel/module.c linux-2.6.37/arch/ia64/kernel/module.c
+--- linux-2.6.37/arch/ia64/kernel/module.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/kernel/module.c 2011-01-17 02:41:00.000000000 -0500
@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
void
module_free (struct module *mod, void *module_region)
@@ -1215,7 +1052,7 @@ index 1481b0a..e7d38ff 100644
unw_remove_unwind_table(mod->arch.init_unw_table);
mod->arch.init_unw_table = NULL;
}
-@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
+@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
}
static inline int
@@ -1257,7 +1094,7 @@ index 1481b0a..e7d38ff 100644
}
static inline int
-@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
+@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
break;
case RV_BDREL:
@@ -1273,7 +1110,7 @@ index 1481b0a..e7d38ff 100644
break;
case RV_LTV:
-@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
+@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
* addresses have been selected...
*/
uint64_t gp;
@@ -1293,10 +1130,9 @@ index 1481b0a..e7d38ff 100644
mod->arch.gp = gp;
DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
}
-diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
-index f6b1ff0..ccacc2f 100644
---- a/arch/ia64/kernel/pci-dma.c
-+++ b/arch/ia64/kernel/pci-dma.c
+diff -urNp linux-2.6.37/arch/ia64/kernel/pci-dma.c linux-2.6.37/arch/ia64/kernel/pci-dma.c
+--- linux-2.6.37/arch/ia64/kernel/pci-dma.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/kernel/pci-dma.c 2011-01-17 02:41:00.000000000 -0500
@@ -43,7 +43,7 @@ struct device fallback_dev = {
.dma_mask = &fallback_dev.coherent_dma_mask,
};
@@ -1306,11 +1142,10 @@ index f6b1ff0..ccacc2f 100644
static int __init pci_iommu_init(void)
{
-diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
-index d9485d9..e3deb12 100644
---- a/arch/ia64/kernel/pci-swiotlb.c
-+++ b/arch/ia64/kernel/pci-swiotlb.c
-@@ -22,7 +22,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
+diff -urNp linux-2.6.37/arch/ia64/kernel/pci-swiotlb.c linux-2.6.37/arch/ia64/kernel/pci-swiotlb.c
+--- linux-2.6.37/arch/ia64/kernel/pci-swiotlb.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/kernel/pci-swiotlb.c 2011-01-17 02:41:00.000000000 -0500
+@@ -22,7 +22,7 @@ static void *ia64_swiotlb_alloc_coherent
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
}
@@ -1319,11 +1154,10 @@ index d9485d9..e3deb12 100644
.alloc_coherent = ia64_swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent,
.map_page = swiotlb_map_page,
-diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
-index 609d500..7dde2a8 100644
---- a/arch/ia64/kernel/sys_ia64.c
-+++ b/arch/ia64/kernel/sys_ia64.c
-@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+diff -urNp linux-2.6.37/arch/ia64/kernel/sys_ia64.c linux-2.6.37/arch/ia64/kernel/sys_ia64.c
+--- linux-2.6.37/arch/ia64/kernel/sys_ia64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/kernel/sys_ia64.c 2011-01-17 02:41:00.000000000 -0500
+@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
if (REGION_NUMBER(addr) == RGN_HPAGE)
addr = 0;
#endif
@@ -1337,7 +1171,7 @@ index 609d500..7dde2a8 100644
if (!addr)
addr = mm->free_area_cache;
-@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
@@ -1355,24 +1189,22 @@ index 609d500..7dde2a8 100644
/* Remember the address where we stopped this search: */
mm->free_area_cache = addr + len;
return addr;
-diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
-index e07218a..fe87c0f 100644
---- a/arch/ia64/kernel/vmlinux.lds.S
-+++ b/arch/ia64/kernel/vmlinux.lds.S
-@@ -196,7 +196,7 @@ SECTIONS
- /* Per-cpu data: */
- . = ALIGN(PERCPU_PAGE_SIZE);
- PERCPU_VADDR(PERCPU_ADDR, :percpu)
-- __phys_per_cpu_start = __per_cpu_load;
-+ __phys_per_cpu_start = per_cpu_load;
- . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
- * into percpu page size
- */
-diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
-index 0799fea..4879544 100644
---- a/arch/ia64/mm/fault.c
-+++ b/arch/ia64/mm/fault.c
-@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
+diff -urNp linux-2.6.37/arch/ia64/kernel/vmlinux.lds.S linux-2.6.37/arch/ia64/kernel/vmlinux.lds.S
+--- linux-2.6.37/arch/ia64/kernel/vmlinux.lds.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/kernel/vmlinux.lds.S 2011-01-17 02:41:00.000000000 -0500
+@@ -199,7 +199,7 @@ SECTIONS {
+ /* Per-cpu data: */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ PERCPU_VADDR(PERCPU_ADDR, :percpu)
+- __phys_per_cpu_start = __per_cpu_load;
++ __phys_per_cpu_start = per_cpu_load;
+ /*
+ * ensure percpu data fits
+ * into percpu page size
+diff -urNp linux-2.6.37/arch/ia64/mm/fault.c linux-2.6.37/arch/ia64/mm/fault.c
+--- linux-2.6.37/arch/ia64/mm/fault.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/mm/fault.c 2011-01-17 02:41:00.000000000 -0500
+@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
return pte_present(pte);
}
@@ -1396,7 +1228,7 @@ index 0799fea..4879544 100644
void __kprobes
ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
{
-@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
+@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
@@ -1421,11 +1253,10 @@ index 0799fea..4879544 100644
/*
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the
-diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
-index 1841ee7..3d78dd6 100644
---- a/arch/ia64/mm/hugetlbpage.c
-+++ b/arch/ia64/mm/hugetlbpage.c
-@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+diff -urNp linux-2.6.37/arch/ia64/mm/hugetlbpage.c linux-2.6.37/arch/ia64/mm/hugetlbpage.c
+--- linux-2.6.37/arch/ia64/mm/hugetlbpage.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/mm/hugetlbpage.c 2011-01-17 02:41:00.000000000 -0500
+@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
/* At this point: (!vmm || addr < vmm->vm_end). */
if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
return -ENOMEM;
@@ -1434,10 +1265,9 @@ index 1841ee7..3d78dd6 100644
return addr;
addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
}
-diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
-index ed41759..fcaf88a 100644
---- a/arch/ia64/mm/init.c
-+++ b/arch/ia64/mm/init.c
+diff -urNp linux-2.6.37/arch/ia64/mm/init.c linux-2.6.37/arch/ia64/mm/init.c
+--- linux-2.6.37/arch/ia64/mm/init.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/mm/init.c 2011-01-17 02:41:00.000000000 -0500
@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
@@ -1458,11 +1288,10 @@ index ed41759..fcaf88a 100644
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
-diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
-index a9d310d..447c7cc 100644
---- a/arch/ia64/sn/pci/pci_dma.c
-+++ b/arch/ia64/sn/pci/pci_dma.c
-@@ -465,7 +465,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
+diff -urNp linux-2.6.37/arch/ia64/sn/pci/pci_dma.c linux-2.6.37/arch/ia64/sn/pci/pci_dma.c
+--- linux-2.6.37/arch/ia64/sn/pci/pci_dma.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/ia64/sn/pci/pci_dma.c 2011-01-17 02:41:00.000000000 -0500
+@@ -465,7 +465,7 @@ int sn_pci_legacy_write(struct pci_bus *
return ret;
}
@@ -1471,10 +1300,9 @@ index a9d310d..447c7cc 100644
.alloc_coherent = sn_dma_alloc_coherent,
.free_coherent = sn_dma_free_coherent,
.map_page = sn_dma_map_page,
-diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
-index 82abd15..d95ae5d 100644
---- a/arch/m32r/lib/usercopy.c
-+++ b/arch/m32r/lib/usercopy.c
+diff -urNp linux-2.6.37/arch/m32r/lib/usercopy.c linux-2.6.37/arch/m32r/lib/usercopy.c
+--- linux-2.6.37/arch/m32r/lib/usercopy.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/m32r/lib/usercopy.c 2011-01-17 02:41:00.000000000 -0500
@@ -14,6 +14,9 @@
unsigned long
__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
@@ -1485,7 +1313,7 @@ index 82abd15..d95ae5d 100644
prefetch(from);
if (access_ok(VERIFY_WRITE, to, n))
__copy_user(to,from,n);
-@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
unsigned long
__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
{
@@ -1495,10 +1323,9 @@ index 82abd15..d95ae5d 100644
prefetchw(to);
if (access_ok(VERIFY_READ, from, n))
__copy_user_zeroing(to,from,n);
-diff --git a/arch/microblaze/include/asm/device.h b/arch/microblaze/include/asm/device.h
-index 123b2fe..f8926eb 100644
---- a/arch/microblaze/include/asm/device.h
-+++ b/arch/microblaze/include/asm/device.h
+diff -urNp linux-2.6.37/arch/microblaze/include/asm/device.h linux-2.6.37/arch/microblaze/include/asm/device.h
+--- linux-2.6.37/arch/microblaze/include/asm/device.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/microblaze/include/asm/device.h 2011-01-17 02:41:00.000000000 -0500
@@ -13,7 +13,7 @@ struct device_node;
struct dev_archdata {
@@ -1508,11 +1335,10 @@ index 123b2fe..f8926eb 100644
void *dma_data;
};
-diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
-index 18b3731..714c068 100644
---- a/arch/microblaze/include/asm/dma-mapping.h
-+++ b/arch/microblaze/include/asm/dma-mapping.h
-@@ -43,14 +43,14 @@ static inline unsigned long device_to_mask(struct device *dev)
+diff -urNp linux-2.6.37/arch/microblaze/include/asm/dma-mapping.h linux-2.6.37/arch/microblaze/include/asm/dma-mapping.h
+--- linux-2.6.37/arch/microblaze/include/asm/dma-mapping.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/microblaze/include/asm/dma-mapping.h 2011-01-17 02:41:00.000000000 -0500
+@@ -43,14 +43,14 @@ static inline unsigned long device_to_ma
return 0xfffffffful;
}
@@ -1530,7 +1356,7 @@ index 18b3731..714c068 100644
{
/* We don't handle the NULL dev case for ISA for now. We could
* do it via an out of line call but it is not needed for now. The
-@@ -63,14 +63,14 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+@@ -63,14 +63,14 @@ static inline struct dma_map_ops *get_dm
return dev->archdata.dma_ops;
}
@@ -1547,7 +1373,7 @@ index 18b3731..714c068 100644
if (unlikely(!ops))
return 0;
-@@ -87,7 +87,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
+@@ -81,7 +81,7 @@ static inline int dma_supported(struct d
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
@@ -1556,7 +1382,7 @@ index 18b3731..714c068 100644
if (unlikely(ops == NULL))
return -EIO;
-@@ -103,7 +103,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+@@ -97,7 +97,7 @@ static inline int dma_set_mask(struct de
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
@@ -1565,7 +1391,7 @@ index 18b3731..714c068 100644
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
-@@ -117,7 +117,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+@@ -110,7 +110,7 @@ static inline int dma_mapping_error(stru
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
@@ -1574,7 +1400,7 @@ index 18b3731..714c068 100644
void *memory;
BUG_ON(!ops);
-@@ -131,7 +131,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+@@ -124,7 +124,7 @@ static inline void *dma_alloc_coherent(s
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
@@ -1583,11 +1409,10 @@ index 18b3731..714c068 100644
BUG_ON(!ops);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
-diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
-index 5a388ee..446d58f 100644
---- a/arch/microblaze/include/asm/pci.h
-+++ b/arch/microblaze/include/asm/pci.h
-@@ -54,8 +54,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
+diff -urNp linux-2.6.37/arch/microblaze/include/asm/pci.h linux-2.6.37/arch/microblaze/include/asm/pci.h
+--- linux-2.6.37/arch/microblaze/include/asm/pci.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/microblaze/include/asm/pci.h 2011-01-17 02:41:00.000000000 -0500
+@@ -54,8 +54,8 @@ static inline void pcibios_penalize_isa_
}
#ifdef CONFIG_PCI
@@ -1598,11 +1423,10 @@ index 5a388ee..446d58f 100644
#else /* CONFIG_PCI */
#define set_pci_dma_ops(d)
#define get_pci_dma_ops() NULL
-diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
-index 79c7465..95a4dbe 100644
---- a/arch/microblaze/kernel/dma.c
-+++ b/arch/microblaze/kernel/dma.c
-@@ -133,7 +133,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
+diff -urNp linux-2.6.37/arch/microblaze/kernel/dma.c linux-2.6.37/arch/microblaze/kernel/dma.c
+--- linux-2.6.37/arch/microblaze/kernel/dma.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/microblaze/kernel/dma.c 2011-01-17 02:41:00.000000000 -0500
+@@ -133,7 +133,7 @@ static inline void dma_direct_unmap_page
__dma_sync_page(dma_address, 0 , size, direction);
}
@@ -1611,11 +1435,26 @@ index 79c7465..95a4dbe 100644
.alloc_coherent = dma_direct_alloc_coherent,
.free_coherent = dma_direct_free_coherent,
.map_sg = dma_direct_map_sg,
-diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
-index 23be25f..d32c14f 100644
---- a/arch/microblaze/pci/pci-common.c
-+++ b/arch/microblaze/pci/pci-common.c
-@@ -46,14 +46,14 @@ resource_size_t isa_mem_base;
+diff -urNp linux-2.6.37/arch/microblaze/kernel/kgdb.c linux-2.6.37/arch/microblaze/kernel/kgdb.c
+--- linux-2.6.37/arch/microblaze/kernel/kgdb.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/microblaze/kernel/kgdb.c 2011-01-17 02:41:00.000000000 -0500
+@@ -141,10 +141,11 @@ void kgdb_arch_exit(void)
+ /*
+ * Global data
+ */
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ #ifdef __MICROBLAZEEL__
+ .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */
+ #else
++>>>>>>> master
+ .gdb_bpt_instr = {0xba, 0x0c, 0x00, 0x18}, /* brki r16, 0x18 */
+ #endif
+ };
+diff -urNp linux-2.6.37/arch/microblaze/pci/pci-common.c linux-2.6.37/arch/microblaze/pci/pci-common.c
+--- linux-2.6.37/arch/microblaze/pci/pci-common.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/microblaze/pci/pci-common.c 2011-01-17 02:41:00.000000000 -0500
+@@ -47,14 +47,14 @@ resource_size_t isa_mem_base;
/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
unsigned int pci_flags;
@@ -1633,10 +1472,9 @@ index 23be25f..d32c14f 100644
{
return pci_dma_ops;
}
-diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
-index 4bbd313..acaf91b 100644
---- a/arch/mips/alchemy/devboards/pm.c
-+++ b/arch/mips/alchemy/devboards/pm.c
+diff -urNp linux-2.6.37/arch/mips/alchemy/devboards/pm.c linux-2.6.37/arch/mips/alchemy/devboards/pm.c
+--- linux-2.6.37/arch/mips/alchemy/devboards/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/alchemy/devboards/pm.c 2011-01-17 02:41:00.000000000 -0500
@@ -110,7 +110,7 @@ static void db1x_pm_end(void)
}
@@ -1646,11 +1484,92 @@ index 4bbd313..acaf91b 100644
.valid = suspend_valid_only_mem,
.begin = db1x_pm_begin,
.enter = db1x_pm_enter,
-diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
-index ea77a42..829dd86 100644
---- a/arch/mips/include/asm/elf.h
-+++ b/arch/mips/include/asm/elf.h
-@@ -368,6 +368,13 @@ extern const char *__elf_platform;
+diff -urNp linux-2.6.37/arch/mips/cavium-octeon/dma-octeon.c linux-2.6.37/arch/mips/cavium-octeon/dma-octeon.c
+--- linux-2.6.37/arch/mips/cavium-octeon/dma-octeon.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/cavium-octeon/dma-octeon.c 2011-01-17 02:41:00.000000000 -0500
+@@ -202,7 +202,7 @@ static phys_addr_t octeon_unity_dma_to_p
+ }
+
+ struct octeon_dma_map_ops {
+- struct dma_map_ops dma_map_ops;
++ const struct dma_map_ops dma_map_ops;
+ dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
+ phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
+ };
+@@ -324,7 +324,7 @@ static struct octeon_dma_map_ops _octeon
+ },
+ };
+
+-struct dma_map_ops *octeon_pci_dma_map_ops;
++const struct dma_map_ops *octeon_pci_dma_map_ops;
+
+ void __init octeon_pci_dma_init(void)
+ {
+diff -urNp linux-2.6.37/arch/mips/include/asm/device.h linux-2.6.37/arch/mips/include/asm/device.h
+--- linux-2.6.37/arch/mips/include/asm/device.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/include/asm/device.h 2011-01-17 02:41:00.000000000 -0500
+@@ -10,7 +10,7 @@ struct dma_map_ops;
+
+ struct dev_archdata {
+ /* DMA operations on that device */
+- struct dma_map_ops *dma_ops;
++ const struct dma_map_ops *dma_ops;
+ };
+
+ struct pdev_archdata {
+diff -urNp linux-2.6.37/arch/mips/include/asm/dma-mapping.h linux-2.6.37/arch/mips/include/asm/dma-mapping.h
+--- linux-2.6.37/arch/mips/include/asm/dma-mapping.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/include/asm/dma-mapping.h 2011-01-17 02:41:00.000000000 -0500
+@@ -7,9 +7,9 @@
+
+ #include <dma-coherence.h>
+
+-extern struct dma_map_ops *mips_dma_map_ops;
++extern const struct dma_map_ops *mips_dma_map_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ if (dev && dev->archdata.dma_ops)
+ return dev->archdata.dma_ops;
+@@ -31,13 +31,13 @@ static inline void dma_mark_clean(void *
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ return ops->dma_supported(dev, mask);
+ }
+
+ static inline int dma_mapping_error(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ return ops->mapping_error(dev, mask);
+ }
+
+@@ -59,7 +59,7 @@ static inline void *dma_alloc_coherent(s
+ dma_addr_t *dma_handle, gfp_t gfp)
+ {
+ void *ret;
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ ret = ops->alloc_coherent(dev, size, dma_handle, gfp);
+
+@@ -71,7 +71,7 @@ static inline void *dma_alloc_coherent(s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ ops->free_coherent(dev, size, vaddr, dma_handle);
+
+diff -urNp linux-2.6.37/arch/mips/include/asm/elf.h linux-2.6.37/arch/mips/include/asm/elf.h
+--- linux-2.6.37/arch/mips/include/asm/elf.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/include/asm/elf.h 2011-01-17 02:41:00.000000000 -0500
+@@ -372,13 +372,16 @@ extern const char *__elf_platform;
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif
@@ -1664,11 +1583,29 @@ index ea77a42..829dd86 100644
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
-diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
-index a16beaf..02e1fae 100644
---- a/arch/mips/include/asm/page.h
-+++ b/arch/mips/include/asm/page.h
-@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
+ int uses_interp);
+
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* _ASM_ELF_H */
+diff -urNp linux-2.6.37/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h linux-2.6.37/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+--- linux-2.6.37/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h 2011-01-17 02:41:00.000000000 -0500
+@@ -66,7 +66,7 @@ dma_addr_t phys_to_dma(struct device *de
+ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+ struct dma_map_ops;
+-extern struct dma_map_ops *octeon_pci_dma_map_ops;
++extern const struct dma_map_ops *octeon_pci_dma_map_ops;
+ extern char *octeon_swiotlb;
+
+ #endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
+diff -urNp linux-2.6.37/arch/mips/include/asm/page.h linux-2.6.37/arch/mips/include/asm/page.h
+--- linux-2.6.37/arch/mips/include/asm/page.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/include/asm/page.h 2011-01-17 02:41:00.000000000 -0500
+@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
#ifdef CONFIG_CPU_MIPS32
typedef struct { unsigned long pte_low, pte_high; } pte_t;
#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
@@ -1677,11 +1614,18 @@ index a16beaf..02e1fae 100644
#else
typedef struct { unsigned long long pte; } pte_t;
#define pte_val(x) ((x).pte)
-diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
-index bb937cc..0b614cd 100644
---- a/arch/mips/include/asm/system.h
-+++ b/arch/mips/include/asm/system.h
-@@ -234,6 +234,6 @@ extern void per_cpu_trap_init(void);
+diff -urNp linux-2.6.37/arch/mips/include/asm/system.h linux-2.6.37/arch/mips/include/asm/system.h
+--- linux-2.6.37/arch/mips/include/asm/system.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/include/asm/system.h 2011-01-17 02:41:00.000000000 -0500
+@@ -23,6 +23,7 @@
+ #include <asm/dsp.h>
+ #include <asm/watch.h>
+ #include <asm/war.h>
++#include <asm/asm.h>
+
+
+ /*
+@@ -230,6 +231,6 @@ extern void per_cpu_trap_init(void);
*/
#define __ARCH_WANT_UNLOCKED_CTXSW
@@ -1689,11 +1633,22 @@ index bb937cc..0b614cd 100644
+#define arch_align_stack(x) ((x) & ALMASK)
#endif /* _ASM_SYSTEM_H */
-diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
-index 9fdd8bc..fcf9d68 100644
---- a/arch/mips/kernel/binfmt_elfn32.c
-+++ b/arch/mips/kernel/binfmt_elfn32.c
-@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+diff -urNp linux-2.6.37/arch/mips/jz4740/pm.c linux-2.6.37/arch/mips/jz4740/pm.c
+--- linux-2.6.37/arch/mips/jz4740/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/jz4740/pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -42,7 +42,7 @@ static int jz4740_pm_enter(suspend_state
+ return 0;
+ }
+
+-static struct platform_suspend_ops jz4740_pm_ops = {
++static const struct platform_suspend_ops jz4740_pm_ops = {
+ .valid = suspend_valid_only_mem,
+ .enter = jz4740_pm_enter,
+ };
+diff -urNp linux-2.6.37/arch/mips/kernel/binfmt_elfn32.c linux-2.6.37/arch/mips/kernel/binfmt_elfn32.c
+--- linux-2.6.37/arch/mips/kernel/binfmt_elfn32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/kernel/binfmt_elfn32.c 2011-01-17 02:41:00.000000000 -0500
+@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
@@ -1707,11 +1662,10 @@ index 9fdd8bc..fcf9d68 100644
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/elfcore.h>
-diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
-index ff44823..cf0b48a 100644
---- a/arch/mips/kernel/binfmt_elfo32.c
-+++ b/arch/mips/kernel/binfmt_elfo32.c
-@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+diff -urNp linux-2.6.37/arch/mips/kernel/binfmt_elfo32.c linux-2.6.37/arch/mips/kernel/binfmt_elfo32.c
+--- linux-2.6.37/arch/mips/kernel/binfmt_elfo32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/kernel/binfmt_elfo32.c 2011-01-17 02:41:00.000000000 -0500
+@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
@@ -1725,11 +1679,10 @@ index ff44823..cf0b48a 100644
#include <asm/processor.h>
/*
-diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
-index 9b78ff6..1228099 100644
---- a/arch/mips/kernel/kgdb.c
-+++ b/arch/mips/kernel/kgdb.c
-@@ -270,6 +270,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+diff -urNp linux-2.6.37/arch/mips/kernel/kgdb.c linux-2.6.37/arch/mips/kernel/kgdb.c
+--- linux-2.6.37/arch/mips/kernel/kgdb.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/kernel/kgdb.c 2011-01-17 02:41:00.000000000 -0500
+@@ -351,6 +351,7 @@ int kgdb_arch_handle_exception(int vecto
return -1;
}
@@ -1737,11 +1690,10 @@ index 9b78ff6..1228099 100644
struct kgdb_arch arch_kgdb_ops;
/*
-diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
-index 9996094..ce0968a 100644
---- a/arch/mips/kernel/process.c
-+++ b/arch/mips/kernel/process.c
-@@ -474,15 +474,3 @@ unsigned long get_wchan(struct task_struct *task)
+diff -urNp linux-2.6.37/arch/mips/kernel/process.c linux-2.6.37/arch/mips/kernel/process.c
+--- linux-2.6.37/arch/mips/kernel/process.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/kernel/process.c 2011-01-17 02:41:00.000000000 -0500
+@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
out:
return pc;
}
@@ -1757,11 +1709,10 @@ index 9996094..ce0968a 100644
-
- return sp & ALMASK;
-}
-diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
-index dd81b0f..2653a71 100644
---- a/arch/mips/kernel/syscall.c
-+++ b/arch/mips/kernel/syscall.c
-@@ -106,17 +106,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+diff -urNp linux-2.6.37/arch/mips/kernel/syscall.c linux-2.6.37/arch/mips/kernel/syscall.c
+--- linux-2.6.37/arch/mips/kernel/syscall.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/kernel/syscall.c 2011-01-17 02:41:00.000000000 -0500
+@@ -108,14 +108,18 @@ unsigned long arch_get_unmapped_area(str
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
@@ -1781,12 +1732,8 @@ index dd81b0f..2653a71 100644
+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
return addr;
}
-- addr = TASK_UNMAPPED_BASE;
-+ addr = current->mm->mmap_base;
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
-@@ -126,7 +130,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = current->mm->mmap_base;
+@@ -128,7 +132,7 @@ unsigned long arch_get_unmapped_area(str
/* At this point: (!vmm || addr < vmm->vm_end). */
if (task_size - len < addr)
return -ENOMEM;
@@ -1795,11 +1742,30 @@ index dd81b0f..2653a71 100644
return addr;
addr = vmm->vm_end;
if (do_color_align)
-diff --git a/arch/mips/loongson/common/pm.c b/arch/mips/loongson/common/pm.c
-index 6c1fd90..f55e07a 100644
---- a/arch/mips/loongson/common/pm.c
-+++ b/arch/mips/loongson/common/pm.c
-@@ -147,7 +147,7 @@ static int loongson_pm_valid_state(suspend_state_t state)
+@@ -168,19 +172,6 @@ static inline unsigned long brk_rnd(void
+ return rnd;
+ }
+
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+-
+ SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long,
+ fd, off_t, offset)
+diff -urNp linux-2.6.37/arch/mips/loongson/common/pm.c linux-2.6.37/arch/mips/loongson/common/pm.c
+--- linux-2.6.37/arch/mips/loongson/common/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/loongson/common/pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -147,7 +147,7 @@ static int loongson_pm_valid_state(suspe
}
}
@@ -1808,13 +1774,33 @@ index 6c1fd90..f55e07a 100644
.valid = loongson_pm_valid_state,
.enter = loongson_pm_enter,
};
-diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
-index b78f7d9..ed674d8 100644
---- a/arch/mips/mm/fault.c
-+++ b/arch/mips/mm/fault.c
-@@ -26,6 +26,23 @@
- #include <asm/ptrace.h>
+diff -urNp linux-2.6.37/arch/mips/mm/dma-default.c linux-2.6.37/arch/mips/mm/dma-default.c
+--- linux-2.6.37/arch/mips/mm/dma-default.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/mm/dma-default.c 2011-01-17 02:41:00.000000000 -0500
+@@ -300,7 +300,7 @@ void dma_cache_sync(struct device *dev,
+
+ EXPORT_SYMBOL(dma_cache_sync);
+
+-static struct dma_map_ops mips_default_dma_map_ops = {
++static const struct dma_map_ops mips_default_dma_map_ops = {
+ .alloc_coherent = mips_dma_alloc_coherent,
+ .free_coherent = mips_dma_free_coherent,
+ .map_page = mips_dma_map_page,
+@@ -315,7 +315,7 @@ static struct dma_map_ops mips_default_d
+ .dma_supported = mips_dma_supported
+ };
+
+-struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
++const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
+ EXPORT_SYMBOL(mips_dma_map_ops);
+
+ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+diff -urNp linux-2.6.37/arch/mips/mm/fault.c linux-2.6.37/arch/mips/mm/fault.c
+--- linux-2.6.37/arch/mips/mm/fault.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/mips/mm/fault.c 2011-01-17 02:41:00.000000000 -0500
+@@ -28,6 +28,23 @@
#include <asm/highmem.h> /* For VMALLOC_END */
+ #include <linux/kdebug.h>
+#ifdef CONFIG_PAX_PAGEEXEC
+void pax_report_insns(void *pc, void *sp)
@@ -1836,11 +1822,10 @@ index b78f7d9..ed674d8 100644
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
-diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
-index 19f6cb1..6c78cf2 100644
---- a/arch/parisc/include/asm/elf.h
-+++ b/arch/parisc/include/asm/elf.h
-@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
+diff -urNp linux-2.6.37/arch/parisc/include/asm/elf.h linux-2.6.37/arch/parisc/include/asm/elf.h
+--- linux-2.6.37/arch/parisc/include/asm/elf.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/parisc/include/asm/elf.h 2011-01-17 02:41:00.000000000 -0500
+@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
@@ -1854,10 +1839,9 @@ index 19f6cb1..6c78cf2 100644
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
-diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
-index 01c1503..4b0fcf4 100644
---- a/arch/parisc/include/asm/pgtable.h
-+++ b/arch/parisc/include/asm/pgtable.h
+diff -urNp linux-2.6.37/arch/parisc/include/asm/pgtable.h linux-2.6.37/arch/parisc/include/asm/pgtable.h
+--- linux-2.6.37/arch/parisc/include/asm/pgtable.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/parisc/include/asm/pgtable.h 2011-01-17 02:41:00.000000000 -0500
@@ -207,6 +207,17 @@
#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
#define PAGE_COPY PAGE_EXECREAD
@@ -1876,10 +1860,9 @@ index 01c1503..4b0fcf4 100644
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
-diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
-index 159a2b8..196c0d8 100644
---- a/arch/parisc/kernel/module.c
-+++ b/arch/parisc/kernel/module.c
+diff -urNp linux-2.6.37/arch/parisc/kernel/module.c linux-2.6.37/arch/parisc/kernel/module.c
+--- linux-2.6.37/arch/parisc/kernel/module.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/parisc/kernel/module.c 2011-01-17 02:41:00.000000000 -0500
@@ -96,16 +96,38 @@
/* three functions to determine where in the module core
@@ -1923,27 +1906,28 @@ index 159a2b8..196c0d8 100644
}
static inline int in_local(struct module *me, void *loc)
-@@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+@@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_
}
/* align things a bit */
- me->core_size = ALIGN(me->core_size, 16);
- me->arch.got_offset = me->core_size;
- me->core_size += gots * sizeof(struct got_entry);
-+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
-+ me->arch.got_offset = me->core_size_rw;
-+ me->core_size_rw += gots * sizeof(struct got_entry);
-
+-
- me->core_size = ALIGN(me->core_size, 16);
- me->arch.fdesc_offset = me->core_size;
- me->core_size += fdescs * sizeof(Elf_Fdesc);
+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += gots * sizeof(struct got_entry);
++
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
+ me->arch.fdesc_offset = me->core_size_rw;
+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
me->arch.got_max = gots;
me->arch.fdesc_max = fdescs;
-@@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+@@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module
BUG_ON(value == 0);
@@ -1952,7 +1936,7 @@ index 159a2b8..196c0d8 100644
for (i = 0; got[i].addr; i++)
if (got[i].addr == value)
goto out;
-@@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+@@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module
#ifdef CONFIG_64BIT
static Elf_Addr get_fdesc(struct module *me, unsigned long value)
{
@@ -1961,7 +1945,7 @@ index 159a2b8..196c0d8 100644
if (!value) {
printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
-@@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+@@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module
/* Create new one */
fdesc->addr = value;
@@ -1979,11 +1963,10 @@ index 159a2b8..196c0d8 100644
DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
me->arch.unwind_section, table, end, gp);
-diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
-index c9b9322..02d8940 100644
---- a/arch/parisc/kernel/sys_parisc.c
-+++ b/arch/parisc/kernel/sys_parisc.c
-@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+diff -urNp linux-2.6.37/arch/parisc/kernel/sys_parisc.c linux-2.6.37/arch/parisc/kernel/sys_parisc.c
+--- linux-2.6.37/arch/parisc/kernel/sys_parisc.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/parisc/kernel/sys_parisc.c 2011-01-17 02:41:00.000000000 -0500
+@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
@@ -1992,7 +1975,7 @@ index c9b9322..02d8940 100644
return addr;
addr = vma->vm_end;
}
-@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
+@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
@@ -2001,7 +1984,7 @@ index c9b9322..02d8940 100644
return addr;
addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
if (addr < vma->vm_end) /* handle wraparound */
-@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
if (flags & MAP_FIXED)
return addr;
if (!addr)
@@ -2010,11 +1993,10 @@ index c9b9322..02d8940 100644
if (filp) {
addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
-diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
-index 8b58bf0..7afff03 100644
---- a/arch/parisc/kernel/traps.c
-+++ b/arch/parisc/kernel/traps.c
-@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+diff -urNp linux-2.6.37/arch/parisc/kernel/traps.c linux-2.6.37/arch/parisc/kernel/traps.c
+--- linux-2.6.37/arch/parisc/kernel/traps.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/parisc/kernel/traps.c 2011-01-17 02:41:00.000000000 -0500
+@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm,regs->iaoq[0]);
@@ -2025,10 +2007,9 @@ index 8b58bf0..7afff03 100644
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
-diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
-index 18162ce..4d274ba 100644
---- a/arch/parisc/mm/fault.c
-+++ b/arch/parisc/mm/fault.c
+diff -urNp linux-2.6.37/arch/parisc/mm/fault.c linux-2.6.37/arch/parisc/mm/fault.c
+--- linux-2.6.37/arch/parisc/mm/fault.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/parisc/mm/fault.c 2011-01-17 02:41:00.000000000 -0500
@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
@@ -2037,7 +2018,7 @@ index 18162ce..4d274ba 100644
#include <asm/uaccess.h>
#include <asm/traps.h>
-@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
+@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
static unsigned long
parisc_acctyp(unsigned long code, unsigned int inst)
{
@@ -2046,7 +2027,7 @@ index 18162ce..4d274ba 100644
return VM_EXEC;
switch (inst & 0xf0000000) {
-@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
+@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
}
#endif
@@ -2198,10 +2179,9 @@ index 18162ce..4d274ba 100644
/*
* If for any reason at all we couldn't handle the fault, make
-diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
-index a3954e4..5db6a83 100644
---- a/arch/powerpc/include/asm/device.h
-+++ b/arch/powerpc/include/asm/device.h
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/device.h linux-2.6.37/arch/powerpc/include/asm/device.h
+--- linux-2.6.37/arch/powerpc/include/asm/device.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/device.h 2011-01-17 02:41:00.000000000 -0500
@@ -11,7 +11,7 @@ struct device_node;
struct dev_archdata {
@@ -2211,17 +2191,17 @@ index a3954e4..5db6a83 100644
/*
* When an iommu is in use, dma_data is used as a ptr to the base of the
-diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
-index c85ef23..01138ae 100644
---- a/arch/powerpc/include/asm/dma-mapping.h
-+++ b/arch/powerpc/include/asm/dma-mapping.h
-@@ -66,12 +66,13 @@ static inline unsigned long device_to_mask(struct device *dev)
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/dma-mapping.h linux-2.6.37/arch/powerpc/include/asm/dma-mapping.h
+--- linux-2.6.37/arch/powerpc/include/asm/dma-mapping.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/dma-mapping.h 2011-01-17 02:41:00.000000000 -0500
+@@ -66,12 +66,13 @@ static inline unsigned long device_to_ma
/*
* Available generic sets of operations
*/
+/* cannot be const */
#ifdef CONFIG_PPC64
- extern struct dma_map_ops dma_iommu_ops;
+-extern struct dma_map_ops dma_iommu_ops;
++extern const struct dma_map_ops dma_iommu_ops;
#endif
-extern struct dma_map_ops dma_direct_ops;
+extern const struct dma_map_ops dma_direct_ops;
@@ -2231,7 +2211,7 @@ index c85ef23..01138ae 100644
{
/* We don't handle the NULL dev case for ISA for now. We could
* do it via an out of line call but it is not needed for now. The
-@@ -84,7 +85,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+@@ -84,7 +85,7 @@ static inline struct dma_map_ops *get_dm
return dev->archdata.dma_ops;
}
@@ -2240,7 +2220,7 @@ index c85ef23..01138ae 100644
{
dev->archdata.dma_ops = ops;
}
-@@ -118,7 +119,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
+@@ -118,7 +119,7 @@ static inline void set_dma_offset(struct
static inline int dma_supported(struct device *dev, u64 mask)
{
@@ -2249,16 +2229,7 @@ index c85ef23..01138ae 100644
if (unlikely(dma_ops == NULL))
return 0;
-@@ -129,7 +130,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
-
- static inline int dma_set_mask(struct device *dev, u64 dma_mask)
- {
-- struct dma_map_ops *dma_ops = get_dma_ops(dev);
-+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- if (unlikely(dma_ops == NULL))
- return -EIO;
-@@ -144,7 +145,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+@@ -132,7 +133,7 @@ extern int dma_set_mask(struct device *d
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
@@ -2267,7 +2238,7 @@ index c85ef23..01138ae 100644
void *cpu_addr;
BUG_ON(!dma_ops);
-@@ -159,7 +160,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+@@ -147,7 +148,7 @@ static inline void *dma_alloc_coherent(s
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
@@ -2276,7 +2247,7 @@ index c85ef23..01138ae 100644
BUG_ON(!dma_ops);
-@@ -170,7 +171,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
+@@ -158,7 +159,7 @@ static inline void dma_free_coherent(str
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
@@ -2285,11 +2256,10 @@ index c85ef23..01138ae 100644
if (dma_ops->mapping_error)
return dma_ops->mapping_error(dev, dma_addr);
-diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
-index c376eda..a8cd687 100644
---- a/arch/powerpc/include/asm/elf.h
-+++ b/arch/powerpc/include/asm/elf.h
-@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/elf.h linux-2.6.37/arch/powerpc/include/asm/elf.h
+--- linux-2.6.37/arch/powerpc/include/asm/elf.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/elf.h 2011-01-17 02:41:00.000000000 -0500
+@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
@@ -2301,8 +2271,8 @@ index c376eda..a8cd687 100644
+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
+
+#ifdef __powerpc64__
-+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
-+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
++#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
++#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
+#else
+#define PAX_DELTA_MMAP_LEN 15
+#define PAX_DELTA_STACK_LEN 15
@@ -2311,7 +2281,7 @@ index c376eda..a8cd687 100644
/*
* Our registers are always unsigned longs, whether we're a 32 bit
-@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
(0x7ff >> (PAGE_SHIFT - 12)) : \
(0x3ffff >> (PAGE_SHIFT - 12)))
@@ -2321,11 +2291,10 @@ index c376eda..a8cd687 100644
#endif /* __KERNEL__ */
/*
-diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
-index edfc980..1766f59 100644
---- a/arch/powerpc/include/asm/iommu.h
-+++ b/arch/powerpc/include/asm/iommu.h
-@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/iommu.h linux-2.6.37/arch/powerpc/include/asm/iommu.h
+--- linux-2.6.37/arch/powerpc/include/asm/iommu.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/iommu.h 2011-01-17 02:41:00.000000000 -0500
+@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
extern void iommu_init_early_dart(void);
extern void iommu_init_early_pasemi(void);
@@ -2335,10 +2304,9 @@ index edfc980..1766f59 100644
#ifdef CONFIG_PCI
extern void pci_iommu_init(void);
extern void pci_direct_iommu_init(void);
-diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
-index bca8fdc..61e9580 100644
---- a/arch/powerpc/include/asm/kmap_types.h
-+++ b/arch/powerpc/include/asm/kmap_types.h
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/kmap_types.h linux-2.6.37/arch/powerpc/include/asm/kmap_types.h
+--- linux-2.6.37/arch/powerpc/include/asm/kmap_types.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/kmap_types.h 2011-01-17 02:41:00.000000000 -0500
@@ -27,6 +27,7 @@ enum km_type {
KM_PPC_SYNC_PAGE,
KM_PPC_SYNC_ICACHE,
@@ -2347,10 +2315,33 @@ index bca8fdc..61e9580 100644
KM_TYPE_NR
};
-diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
-index 53b64be..82be2e0 100644
---- a/arch/powerpc/include/asm/page.h
-+++ b/arch/powerpc/include/asm/page.h
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/page_64.h linux-2.6.37/arch/powerpc/include/asm/page_64.h
+--- linux-2.6.37/arch/powerpc/include/asm/page_64.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/page_64.h 2011-01-17 02:41:00.000000000 -0500
+@@ -172,15 +172,18 @@ do { \
+ * stack by default, so in the absense of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+ (is_32bit_task() ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+
+ #include <asm-generic/getorder.h>
+
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/page.h linux-2.6.37/arch/powerpc/include/asm/page.h
+--- linux-2.6.37/arch/powerpc/include/asm/page.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/page.h 2011-01-17 02:41:00.000000000 -0500
@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
* and needs to be executable. This means the whole heap ends
* up being executable.
@@ -2373,36 +2364,10 @@ index 53b64be..82be2e0 100644
#ifndef __ASSEMBLY__
#undef STRICT_MM_TYPECHECKS
-diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
-index 358ff14..fbf4ef6 100644
---- a/arch/powerpc/include/asm/page_64.h
-+++ b/arch/powerpc/include/asm/page_64.h
-@@ -172,15 +172,18 @@ do { \
- * stack by default, so in the absense of a PT_GNU_STACK program header
- * we turn execute permission off.
- */
--#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
-- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-+#define VM_STACK_DEFAULT_FLAGS32 \
-+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
-+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
- #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-+#ifndef CONFIG_PAX_PAGEEXEC
- #define VM_STACK_DEFAULT_FLAGS \
- (test_thread_flag(TIF_32BIT) ? \
- VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
-+#endif
-
- #include <asm-generic/getorder.h>
-
-diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
-index a20a9ad..158bb9a 100644
---- a/arch/powerpc/include/asm/pci.h
-+++ b/arch/powerpc/include/asm/pci.h
-@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/pci.h linux-2.6.37/arch/powerpc/include/asm/pci.h
+--- linux-2.6.37/arch/powerpc/include/asm/pci.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/pci.h 2011-01-17 02:41:00.000000000 -0500
+@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
}
#ifdef CONFIG_PCI
@@ -2413,10 +2378,9 @@ index a20a9ad..158bb9a 100644
#else /* CONFIG_PCI */
#define set_pci_dma_ops(d)
#define get_pci_dma_ops() NULL
-diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
-index 4aad413..85d86bf 100644
---- a/arch/powerpc/include/asm/pte-hash32.h
-+++ b/arch/powerpc/include/asm/pte-hash32.h
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/pte-hash32.h linux-2.6.37/arch/powerpc/include/asm/pte-hash32.h
+--- linux-2.6.37/arch/powerpc/include/asm/pte-hash32.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/pte-hash32.h 2011-01-17 02:41:00.000000000 -0500
@@ -21,6 +21,7 @@
#define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
#define _PAGE_USER 0x004 /* usermode access allowed */
@@ -2425,10 +2389,9 @@ index 4aad413..85d86bf 100644
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
-diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
-index d62fdf4..fb32167 100644
---- a/arch/powerpc/include/asm/reg.h
-+++ b/arch/powerpc/include/asm/reg.h
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/reg.h linux-2.6.37/arch/powerpc/include/asm/reg.h
+--- linux-2.6.37/arch/powerpc/include/asm/reg.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/reg.h 2011-01-17 02:41:00.000000000 -0500
@@ -191,6 +191,7 @@
#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
@@ -2437,10 +2400,9 @@ index d62fdf4..fb32167 100644
#define DSISR_PROTFAULT 0x08000000 /* protection fault */
#define DSISR_ISSTORE 0x02000000 /* access was a store */
#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
-diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
-index 8979d4c..d2fd0d3 100644
---- a/arch/powerpc/include/asm/swiotlb.h
-+++ b/arch/powerpc/include/asm/swiotlb.h
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/swiotlb.h linux-2.6.37/arch/powerpc/include/asm/swiotlb.h
+--- linux-2.6.37/arch/powerpc/include/asm/swiotlb.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/swiotlb.h 2011-01-17 02:41:00.000000000 -0500
@@ -13,7 +13,7 @@
#include <linux/swiotlb.h>
@@ -2450,10 +2412,21 @@ index 8979d4c..d2fd0d3 100644
static inline void dma_mark_clean(void *addr, size_t size) {}
-diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
-index bd0fb84..a42a14b 100644
---- a/arch/powerpc/include/asm/uaccess.h
-+++ b/arch/powerpc/include/asm/uaccess.h
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/system.h linux-2.6.37/arch/powerpc/include/asm/system.h
+--- linux-2.6.37/arch/powerpc/include/asm/system.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/system.h 2011-01-17 02:41:00.000000000 -0500
+@@ -533,7 +533,7 @@ __cmpxchg_local(volatile void *ptr, unsi
+ #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+ #endif
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ /* Used in very early kernel initialization. */
+ extern unsigned long reloc_offset(void);
+diff -urNp linux-2.6.37/arch/powerpc/include/asm/uaccess.h linux-2.6.37/arch/powerpc/include/asm/uaccess.h
+--- linux-2.6.37/arch/powerpc/include/asm/uaccess.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/include/asm/uaccess.h 2011-01-17 02:41:00.000000000 -0500
@@ -13,6 +13,8 @@
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -2516,7 +2489,7 @@ index bd0fb84..a42a14b 100644
static inline unsigned long __copy_from_user_inatomic(void *to,
const void __user *from, unsigned long n)
{
-@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
+@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
if (ret == 0)
return 0;
}
@@ -2527,7 +2500,7 @@ index bd0fb84..a42a14b 100644
return __copy_tofrom_user((__force void __user *)to, from, n);
}
-@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
+@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
if (ret == 0)
return 0;
}
@@ -2538,7 +2511,7 @@ index bd0fb84..a42a14b 100644
return __copy_tofrom_user(to, (__force const void __user *)from, n);
}
-@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
+@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
return __copy_to_user_inatomic(to, from, size);
}
@@ -2631,11 +2604,31 @@ index bd0fb84..a42a14b 100644
extern unsigned long __clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
-diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
-index 37771a5..648530c 100644
---- a/arch/powerpc/kernel/dma-iommu.c
-+++ b/arch/powerpc/kernel/dma-iommu.c
-@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
+diff -urNp linux-2.6.37/arch/powerpc/kernel/dma.c linux-2.6.37/arch/powerpc/kernel/dma.c
+--- linux-2.6.37/arch/powerpc/kernel/dma.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/dma.c 2011-01-17 02:41:00.000000000 -0500
+@@ -136,7 +136,7 @@ static inline void dma_direct_sync_singl
+ }
+ #endif
+
+-struct dma_map_ops dma_direct_ops = {
++const struct dma_map_ops dma_direct_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = dma_direct_map_sg,
+@@ -157,7 +157,7 @@ EXPORT_SYMBOL(dma_direct_ops);
+
+ int dma_set_mask(struct device *dev, u64 dma_mask)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (ppc_md.dma_set_mask)
+ return ppc_md.dma_set_mask(dev, dma_mask);
+diff -urNp linux-2.6.37/arch/powerpc/kernel/dma-iommu.c linux-2.6.37/arch/powerpc/kernel/dma-iommu.c
+--- linux-2.6.37/arch/powerpc/kernel/dma-iommu.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/dma-iommu.c 2011-01-17 02:41:00.000000000 -0500
+@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
}
/* We support DMA to/from any memory page via the iommu */
@@ -2644,10 +2637,17 @@ index 37771a5..648530c 100644
{
struct iommu_table *tbl = get_iommu_table_base(dev);
-diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
-index 02f724f..065adf6 100644
---- a/arch/powerpc/kernel/dma-swiotlb.c
-+++ b/arch/powerpc/kernel/dma-swiotlb.c
+@@ -90,6 +90,7 @@ static int dma_iommu_dma_supported(struc
+ return 1;
+ }
+
++/* cannot be const, see arch/powerpc/platforms/cell/iommu.c */
+ struct dma_map_ops dma_iommu_ops = {
+ .alloc_coherent = dma_iommu_alloc_coherent,
+ .free_coherent = dma_iommu_free_coherent,
+diff -urNp linux-2.6.37/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.37/arch/powerpc/kernel/dma-swiotlb.c
+--- linux-2.6.37/arch/powerpc/kernel/dma-swiotlb.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/dma-swiotlb.c 2011-01-17 02:41:00.000000000 -0500
@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
* map_page, and unmap_page on highmem, use normal dma_ops
* for everything else.
@@ -2657,24 +2657,10 @@ index 02f724f..065adf6 100644
.alloc_coherent = dma_direct_alloc_coherent,
.free_coherent = dma_direct_free_coherent,
.map_sg = swiotlb_map_sg_attrs,
-diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
-index 84d6367..df07362 100644
---- a/arch/powerpc/kernel/dma.c
-+++ b/arch/powerpc/kernel/dma.c
-@@ -135,7 +135,7 @@ static inline void dma_direct_sync_single(struct device *dev,
- }
- #endif
-
--struct dma_map_ops dma_direct_ops = {
-+const struct dma_map_ops dma_direct_ops = {
- .alloc_coherent = dma_direct_alloc_coherent,
- .free_coherent = dma_direct_free_coherent,
- .map_sg = dma_direct_map_sg,
-diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
-index 24dcc0e..a300455 100644
---- a/arch/powerpc/kernel/exceptions-64e.S
-+++ b/arch/powerpc/kernel/exceptions-64e.S
-@@ -455,6 +455,7 @@ storage_fault_common:
+diff -urNp linux-2.6.37/arch/powerpc/kernel/exceptions-64e.S linux-2.6.37/arch/powerpc/kernel/exceptions-64e.S
+--- linux-2.6.37/arch/powerpc/kernel/exceptions-64e.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/exceptions-64e.S 2011-01-17 02:41:00.000000000 -0500
+@@ -495,6 +495,7 @@ storage_fault_common:
std r14,_DAR(r1)
std r15,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -2682,7 +2668,7 @@ index 24dcc0e..a300455 100644
mr r4,r14
mr r5,r15
ld r14,PACA_EXGEN+EX_R14(r13)
-@@ -464,8 +465,7 @@ storage_fault_common:
+@@ -504,8 +505,7 @@ storage_fault_common:
cmpdi r3,0
bne- 1f
b .ret_from_except_lite
@@ -2692,11 +2678,10 @@ index 24dcc0e..a300455 100644
addi r3,r1,STACK_FRAME_OVERHEAD
ld r4,_DAR(r1)
bl .bad_page_fault
-diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
-index 3e423fb..34f47a0 100644
---- a/arch/powerpc/kernel/exceptions-64s.S
-+++ b/arch/powerpc/kernel/exceptions-64s.S
-@@ -840,10 +840,10 @@ handle_page_fault:
+diff -urNp linux-2.6.37/arch/powerpc/kernel/exceptions-64s.S linux-2.6.37/arch/powerpc/kernel/exceptions-64s.S
+--- linux-2.6.37/arch/powerpc/kernel/exceptions-64s.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/exceptions-64s.S 2011-01-17 02:41:00.000000000 -0500
+@@ -847,10 +847,10 @@ handle_page_fault:
11: ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -2708,11 +2693,10 @@ index 3e423fb..34f47a0 100644
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
lwz r4,_DAR(r1)
-diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
-index 21266ab..e27733b 100644
---- a/arch/powerpc/kernel/ibmebus.c
-+++ b/arch/powerpc/kernel/ibmebus.c
-@@ -128,7 +128,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
+diff -urNp linux-2.6.37/arch/powerpc/kernel/ibmebus.c linux-2.6.37/arch/powerpc/kernel/ibmebus.c
+--- linux-2.6.37/arch/powerpc/kernel/ibmebus.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/ibmebus.c 2011-01-17 02:41:00.000000000 -0500
+@@ -128,7 +128,7 @@ static int ibmebus_dma_supported(struct
return 1;
}
@@ -2721,20 +2705,10 @@ index 21266ab..e27733b 100644
.alloc_coherent = ibmebus_alloc_coherent,
.free_coherent = ibmebus_free_coherent,
.map_sg = ibmebus_map_sg,
-diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
-index 82a7b22..fe7180e 100644
---- a/arch/powerpc/kernel/kgdb.c
-+++ b/arch/powerpc/kernel/kgdb.c
-@@ -128,7 +128,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
- if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
- return 0;
-
-- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
-+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
- regs->nip += 4;
-
- return 1;
-@@ -360,7 +360,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+diff -urNp linux-2.6.37/arch/powerpc/kernel/kgdb.c linux-2.6.37/arch/powerpc/kernel/kgdb.c
+--- linux-2.6.37/arch/powerpc/kernel/kgdb.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/kgdb.c 2011-01-17 02:41:00.000000000 -0500
+@@ -422,7 +422,7 @@ int kgdb_arch_handle_exception(int vecto
/*
* Global data
*/
@@ -2743,10 +2717,41 @@ index 82a7b22..fe7180e 100644
.gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
};
-diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
-index 477c663..4f50234 100644
---- a/arch/powerpc/kernel/module.c
-+++ b/arch/powerpc/kernel/module.c
+diff -urNp linux-2.6.37/arch/powerpc/kernel/module_32.c linux-2.6.37/arch/powerpc/kernel/module_32.c
+--- linux-2.6.37/arch/powerpc/kernel/module_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/module_32.c 2011-01-17 02:41:00.000000000 -0500
+@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
+ me->arch.core_plt_section = i;
+ }
+ if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+- printk("Module doesn't contain .plt or .init.plt sections.\n");
++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
+ return -ENOEXEC;
+ }
+
+@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
+
+ DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ /* Init, or core PLT? */
+- if (location >= mod->module_core
+- && location < mod->module_core + mod->core_size)
++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
+ entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+- else
++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
+ entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++ else {
++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++ return ~0UL;
++ }
+
+ /* Find this entry, or if that fails, the next avail. entry */
+ while (entry->jump[0]) {
+diff -urNp linux-2.6.37/arch/powerpc/kernel/module.c linux-2.6.37/arch/powerpc/kernel/module.c
+--- linux-2.6.37/arch/powerpc/kernel/module.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/module.c 2011-01-17 02:41:00.000000000 -0500
@@ -31,11 +31,24 @@
LIST_HEAD(module_bug_list);
@@ -2772,7 +2777,7 @@ index 477c663..4f50234 100644
return vmalloc_exec(size);
}
-@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
+@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
vfree(module_region);
}
@@ -2786,44 +2791,10 @@ index 477c663..4f50234 100644
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *name)
-diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
-index f832773..0507238 100644
---- a/arch/powerpc/kernel/module_32.c
-+++ b/arch/powerpc/kernel/module_32.c
-@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
- me->arch.core_plt_section = i;
- }
- if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
-- printk("Module doesn't contain .plt or .init.plt sections.\n");
-+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
- return -ENOEXEC;
- }
-
-@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
-
- DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
- /* Init, or core PLT? */
-- if (location >= mod->module_core
-- && location < mod->module_core + mod->core_size)
-+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
-+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
- entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
-- else
-+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
-+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
- entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
-+ else {
-+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
-+ return ~0UL;
-+ }
-
- /* Find this entry, or if that fails, the next avail. entry */
- while (entry->jump[0]) {
-diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
-index 5b38f6a..8175940 100644
---- a/arch/powerpc/kernel/pci-common.c
-+++ b/arch/powerpc/kernel/pci-common.c
-@@ -51,14 +51,14 @@ resource_size_t isa_mem_base;
+diff -urNp linux-2.6.37/arch/powerpc/kernel/pci-common.c linux-2.6.37/arch/powerpc/kernel/pci-common.c
+--- linux-2.6.37/arch/powerpc/kernel/pci-common.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/pci-common.c 2011-01-17 02:41:00.000000000 -0500
+@@ -52,14 +52,14 @@ resource_size_t isa_mem_base;
unsigned int ppc_pci_flags = 0;
@@ -2841,14 +2812,53 @@ index 5b38f6a..8175940 100644
{
return pci_dma_ops;
}
-diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
-index 773424d..1d2df74 100644
---- a/arch/powerpc/kernel/process.c
-+++ b/arch/powerpc/kernel/process.c
-@@ -1215,51 +1215,3 @@ unsigned long arch_align_stack(unsigned long sp)
- sp -= get_random_int() & ~PAGE_MASK;
- return sp & ~0xf;
+diff -urNp linux-2.6.37/arch/powerpc/kernel/process.c linux-2.6.37/arch/powerpc/kernel/process.c
+--- linux-2.6.37/arch/powerpc/kernel/process.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/process.c 2011-01-17 02:41:00.000000000 -0500
+@@ -653,8 +653,8 @@ void show_regs(struct pt_regs * regs)
+ * Lookup NIP late so we have the best change of getting the
+ * above info out without failing
+ */
+- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
++ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
++ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
+ #endif
+ show_stack(current, (unsigned long *) regs->gpr[1]);
+ if (!user_mode(regs))
+@@ -1144,10 +1144,10 @@ void show_stack(struct task_struct *tsk,
+ newsp = stack[0];
+ ip = stack[STACK_FRAME_LR_SAVE];
+ if (!firstframe || ip != lr) {
+- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
++ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((ip == rth || ip == mrth) && curr_frame >= 0) {
+- printk(" (%pS)",
++ printk(" (%pA)",
+ (void *)current->ret_stack[curr_frame].ret);
+ curr_frame--;
+ }
+@@ -1167,7 +1167,7 @@ void show_stack(struct task_struct *tsk,
+ struct pt_regs *regs = (struct pt_regs *)
+ (sp + STACK_FRAME_OVERHEAD);
+ lr = regs->link;
+- printk("--- Exception: %lx at %pS\n LR = %pS\n",
++ printk("--- Exception: %lx at %pA\n LR = %pA\n",
+ regs->trap, (void *)regs->nip, (void *)lr);
+ firstframe = 1;
+ }
+@@ -1242,58 +1242,3 @@ void thread_info_cache_init(void)
}
+
+ #endif /* THREAD_SHIFT < PAGE_SHIFT */
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
-
-static inline unsigned long brk_rnd(void)
-{
@@ -2897,11 +2907,10 @@ index 773424d..1d2df74 100644
-
- return ret;
-}
-diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
-index 2666101..e908fba 100644
---- a/arch/powerpc/kernel/signal_32.c
-+++ b/arch/powerpc/kernel/signal_32.c
-@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
+diff -urNp linux-2.6.37/arch/powerpc/kernel/signal_32.c linux-2.6.37/arch/powerpc/kernel/signal_32.c
+--- linux-2.6.37/arch/powerpc/kernel/signal_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/signal_32.c 2011-01-17 02:41:00.000000000 -0500
+@@ -858,7 +858,7 @@ int handle_rt_signal32(unsigned long sig
/* Save user registers on the stack */
frame = &rt_sf->uc.uc_mcontext;
addr = frame;
@@ -2910,11 +2919,10 @@ index 2666101..e908fba 100644
if (save_user_regs(regs, frame, 0, 1))
goto badframe;
regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
-diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
-index 2fe6fc6..ada0d96 100644
---- a/arch/powerpc/kernel/signal_64.c
-+++ b/arch/powerpc/kernel/signal_64.c
-@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
+diff -urNp linux-2.6.37/arch/powerpc/kernel/signal_64.c linux-2.6.37/arch/powerpc/kernel/signal_64.c
+--- linux-2.6.37/arch/powerpc/kernel/signal_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/signal_64.c 2011-01-17 02:41:00.000000000 -0500
+@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
current->thread.fpscr.val = 0;
/* Set up to return from userspace. */
@@ -2923,10 +2931,9 @@ index 2fe6fc6..ada0d96 100644
regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
-diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
-index 13002fe..e25f4b1 100644
---- a/arch/powerpc/kernel/vdso.c
-+++ b/arch/powerpc/kernel/vdso.c
+diff -urNp linux-2.6.37/arch/powerpc/kernel/vdso.c linux-2.6.37/arch/powerpc/kernel/vdso.c
+--- linux-2.6.37/arch/powerpc/kernel/vdso.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/vdso.c 2011-01-17 02:41:00.000000000 -0500
@@ -36,6 +36,7 @@
#include <asm/firmware.h>
#include <asm/vdso.h>
@@ -2935,7 +2942,7 @@ index 13002fe..e25f4b1 100644
#include "setup.h"
-@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
vdso_base = VDSO32_MBASE;
#endif
@@ -2944,7 +2951,7 @@ index 13002fe..e25f4b1 100644
/* vDSO has a problem and was disabled, just don't "enable" it for the
* process
-@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
vdso_base = get_unmapped_area(NULL, vdso_base,
(vdso_pages << PAGE_SHIFT) +
((VDSO_ALIGNMENT - 1) & PAGE_MASK),
@@ -2953,11 +2960,10 @@ index 13002fe..e25f4b1 100644
if (IS_ERR_VALUE(vdso_base)) {
rc = vdso_base;
goto fail_mmapsem;
-diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
-index 00b9436..699269a 100644
---- a/arch/powerpc/kernel/vio.c
-+++ b/arch/powerpc/kernel/vio.c
-@@ -602,11 +602,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
+diff -urNp linux-2.6.37/arch/powerpc/kernel/vio.c linux-2.6.37/arch/powerpc/kernel/vio.c
+--- linux-2.6.37/arch/powerpc/kernel/vio.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/kernel/vio.c 2011-01-17 02:41:00.000000000 -0500
+@@ -600,11 +600,12 @@ static void vio_dma_iommu_unmap_sg(struc
vio_cmo_dealloc(viodev, alloc_size);
}
@@ -2971,7 +2977,7 @@ index 00b9436..699269a 100644
.map_page = vio_dma_iommu_map_page,
.unmap_page = vio_dma_iommu_unmap_page,
-@@ -860,7 +861,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
+@@ -858,7 +859,6 @@ static void vio_cmo_bus_remove(struct vi
static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
{
@@ -2979,10 +2985,9 @@ index 00b9436..699269a 100644
viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
}
-diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
-index 5eea6f3..5d10396 100644
---- a/arch/powerpc/lib/usercopy_64.c
-+++ b/arch/powerpc/lib/usercopy_64.c
+diff -urNp linux-2.6.37/arch/powerpc/lib/usercopy_64.c linux-2.6.37/arch/powerpc/lib/usercopy_64.c
+--- linux-2.6.37/arch/powerpc/lib/usercopy_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/lib/usercopy_64.c 2011-01-17 02:41:00.000000000 -0500
@@ -9,22 +9,6 @@
#include <linux/module.h>
#include <asm/uaccess.h>
@@ -3006,7 +3011,7 @@ index 5eea6f3..5d10396 100644
unsigned long copy_in_user(void __user *to, const void __user *from,
unsigned long n)
{
-@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
return n;
}
@@ -3014,14 +3019,13 @@ index 5eea6f3..5d10396 100644
-EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(copy_in_user);
-diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
-index 1bd712c..4a0026d 100644
---- a/arch/powerpc/mm/fault.c
-+++ b/arch/powerpc/mm/fault.c
-@@ -30,6 +30,10 @@
- #include <linux/kprobes.h>
+diff -urNp linux-2.6.37/arch/powerpc/mm/fault.c linux-2.6.37/arch/powerpc/mm/fault.c
+--- linux-2.6.37/arch/powerpc/mm/fault.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/mm/fault.c 2011-01-17 02:41:00.000000000 -0500
+@@ -31,6 +31,10 @@
#include <linux/kdebug.h>
#include <linux/perf_event.h>
+ #include <linux/magic.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
@@ -3029,7 +3033,7 @@ index 1bd712c..4a0026d 100644
#include <asm/firmware.h>
#include <asm/page.h>
-@@ -41,6 +45,7 @@
+@@ -42,6 +46,7 @@
#include <asm/tlbflush.h>
#include <asm/siginfo.h>
#include <mm/mmu_decl.h>
@@ -3037,7 +3041,7 @@ index 1bd712c..4a0026d 100644
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
-@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
+@@ -65,6 +70,33 @@ static inline int notify_page_fault(stru
}
#endif
@@ -3071,7 +3075,7 @@ index 1bd712c..4a0026d 100644
/*
* Check whether the instruction at regs->nip is a store using
* an update addressing form which will update r1.
-@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+@@ -135,7 +167,7 @@ int __kprobes do_page_fault(struct pt_re
* indicate errors in DSISR but can validly be set in SRR1.
*/
if (trap == 0x400)
@@ -3080,7 +3084,7 @@ index 1bd712c..4a0026d 100644
else
is_write = error_code & DSISR_ISSTORE;
#else
-@@ -257,7 +289,7 @@ good_area:
+@@ -258,7 +290,7 @@ good_area:
* "undefined". Of those that can be set, this is the only
* one which seems bad.
*/
@@ -3089,7 +3093,7 @@ index 1bd712c..4a0026d 100644
/* Guarded storage error. */
goto bad_area;
#endif /* CONFIG_8xx */
-@@ -272,7 +304,7 @@ good_area:
+@@ -273,7 +305,7 @@ good_area:
* processors use the same I/D cache coherency mechanism
* as embedded.
*/
@@ -3098,7 +3102,7 @@ index 1bd712c..4a0026d 100644
goto bad_area;
#endif /* CONFIG_PPC_STD_MMU */
-@@ -341,6 +373,23 @@ bad_area:
+@@ -342,6 +374,23 @@ bad_area:
bad_area_nosemaphore:
/* User mode accesses cause a SIGSEGV */
if (user_mode(regs)) {
@@ -3122,11 +3126,10 @@ index 1bd712c..4a0026d 100644
_exception(SIGSEGV, regs, code, address);
return 0;
}
-diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
-index 5a783d8..c23e14b 100644
---- a/arch/powerpc/mm/mmap_64.c
-+++ b/arch/powerpc/mm/mmap_64.c
-@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+diff -urNp linux-2.6.37/arch/powerpc/mm/mmap_64.c linux-2.6.37/arch/powerpc/mm/mmap_64.c
+--- linux-2.6.37/arch/powerpc/mm/mmap_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/mm/mmap_64.c 2011-01-17 02:41:00.000000000 -0500
+@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
*/
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE;
@@ -3149,11 +3152,10 @@ index 5a783d8..c23e14b 100644
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
-diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
-index ba51948..165f6a1 100644
---- a/arch/powerpc/mm/slice.c
-+++ b/arch/powerpc/mm/slice.c
-@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
+diff -urNp linux-2.6.37/arch/powerpc/mm/slice.c linux-2.6.37/arch/powerpc/mm/slice.c
+--- linux-2.6.37/arch/powerpc/mm/slice.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/mm/slice.c 2011-01-17 02:41:00.000000000 -0500
+@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
if ((mm->task_size - len) < addr)
return 0;
vma = find_vma(mm, addr);
@@ -3171,7 +3173,7 @@ index ba51948..165f6a1 100644
/*
* Remember the place where we stopped the search:
*/
-@@ -336,7 +336,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
+@@ -336,7 +336,7 @@ static unsigned long slice_find_area_top
* return with success:
*/
vma = find_vma(mm, addr);
@@ -3180,7 +3182,7 @@ index ba51948..165f6a1 100644
/* remember the address as a hint for next time */
if (use_cache)
mm->free_area_cache = addr;
-@@ -426,6 +426,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+@@ -426,6 +426,11 @@ unsigned long slice_get_unmapped_area(un
if (fixed && addr > (mm->task_size - len))
return -EINVAL;
@@ -3192,11 +3194,10 @@ index ba51948..165f6a1 100644
/* If hint, make sure it matches our alignment restrictions */
if (!fixed && addr) {
addr = _ALIGN_UP(addr, 1ul << pshift);
-diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
-index b5c753d..8f01abe 100644
---- a/arch/powerpc/platforms/52xx/lite5200_pm.c
-+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
-@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
+diff -urNp linux-2.6.37/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.37/arch/powerpc/platforms/52xx/lite5200_pm.c
+--- linux-2.6.37/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -232,7 +232,7 @@ static void lite5200_pm_end(void)
lite5200_pm_target_state = PM_SUSPEND_ON;
}
@@ -3205,11 +3206,10 @@ index b5c753d..8f01abe 100644
.valid = lite5200_pm_valid,
.begin = lite5200_pm_begin,
.prepare = lite5200_pm_prepare,
-diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
-index 7672253..4dfe095 100644
---- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
-+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
-@@ -189,7 +189,7 @@ void mpc52xx_pm_finish(void)
+diff -urNp linux-2.6.37/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.37/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+--- linux-2.6.37/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -186,7 +186,7 @@ void mpc52xx_pm_finish(void)
iounmap(mbar);
}
@@ -3218,10 +3218,9 @@ index 7672253..4dfe095 100644
.valid = mpc52xx_pm_valid,
.prepare = mpc52xx_pm_prepare,
.enter = mpc52xx_pm_enter,
-diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
-index ebe6c35..8914e7b 100644
---- a/arch/powerpc/platforms/83xx/suspend.c
-+++ b/arch/powerpc/platforms/83xx/suspend.c
+diff -urNp linux-2.6.37/arch/powerpc/platforms/83xx/suspend.c linux-2.6.37/arch/powerpc/platforms/83xx/suspend.c
+--- linux-2.6.37/arch/powerpc/platforms/83xx/suspend.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/platforms/83xx/suspend.c 2011-01-17 02:41:00.000000000 -0500
@@ -311,7 +311,7 @@ static int mpc83xx_is_pci_agent(void)
return ret;
}
@@ -3231,11 +3230,10 @@ index ebe6c35..8914e7b 100644
.valid = mpc83xx_suspend_valid,
.begin = mpc83xx_suspend_begin,
.enter = mpc83xx_suspend_enter,
-diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
-index 3712900..645f19f 100644
---- a/arch/powerpc/platforms/cell/iommu.c
-+++ b/arch/powerpc/platforms/cell/iommu.c
-@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
+diff -urNp linux-2.6.37/arch/powerpc/platforms/cell/iommu.c linux-2.6.37/arch/powerpc/platforms/cell/iommu.c
+--- linux-2.6.37/arch/powerpc/platforms/cell/iommu.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/platforms/cell/iommu.c 2011-01-17 02:41:00.000000000 -0500
+@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
@@ -3244,11 +3242,10 @@ index 3712900..645f19f 100644
.alloc_coherent = dma_fixed_alloc_coherent,
.free_coherent = dma_fixed_free_coherent,
.map_sg = dma_fixed_map_sg,
-diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
-index 23083c3..ec00e40 100644
---- a/arch/powerpc/platforms/ps3/system-bus.c
-+++ b/arch/powerpc/platforms/ps3/system-bus.c
-@@ -695,7 +695,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
+diff -urNp linux-2.6.37/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.37/arch/powerpc/platforms/ps3/system-bus.c
+--- linux-2.6.37/arch/powerpc/platforms/ps3/system-bus.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/platforms/ps3/system-bus.c 2011-01-17 02:41:00.000000000 -0500
+@@ -695,7 +695,7 @@ static int ps3_dma_supported(struct devi
return mask >= DMA_BIT_MASK(32);
}
@@ -3257,7 +3254,7 @@ index 23083c3..ec00e40 100644
.alloc_coherent = ps3_alloc_coherent,
.free_coherent = ps3_free_coherent,
.map_sg = ps3_sb_map_sg,
-@@ -705,7 +705,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
+@@ -705,7 +705,7 @@ static struct dma_map_ops ps3_sb_dma_ops
.unmap_page = ps3_unmap_page,
};
@@ -3266,11 +3263,22 @@ index 23083c3..ec00e40 100644
.alloc_coherent = ps3_alloc_coherent,
.free_coherent = ps3_free_coherent,
.map_sg = ps3_ioc0_map_sg,
-diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
-index 9082eb9..47d3ac2 100644
---- a/arch/powerpc/sysdev/fsl_pmc.c
-+++ b/arch/powerpc/sysdev/fsl_pmc.c
-@@ -53,7 +53,7 @@ static int pmc_suspend_valid(suspend_state_t state)
+diff -urNp linux-2.6.37/arch/powerpc/platforms/pseries/suspend.c linux-2.6.37/arch/powerpc/platforms/pseries/suspend.c
+--- linux-2.6.37/arch/powerpc/platforms/pseries/suspend.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/platforms/pseries/suspend.c 2011-01-17 02:41:00.000000000 -0500
+@@ -153,7 +153,7 @@ static struct sysdev_class suspend_sysde
+ .name = "power",
+ };
+
+-static struct platform_suspend_ops pseries_suspend_ops = {
++static const struct platform_suspend_ops pseries_suspend_ops = {
+ .valid = suspend_valid_only_mem,
+ .begin = pseries_suspend_begin,
+ .prepare_late = pseries_prepare_late,
+diff -urNp linux-2.6.37/arch/powerpc/sysdev/fsl_pmc.c linux-2.6.37/arch/powerpc/sysdev/fsl_pmc.c
+--- linux-2.6.37/arch/powerpc/sysdev/fsl_pmc.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/powerpc/sysdev/fsl_pmc.c 2011-01-17 02:41:00.000000000 -0500
+@@ -53,7 +53,7 @@ static int pmc_suspend_valid(suspend_sta
return 1;
}
@@ -3279,32 +3287,9 @@ index 9082eb9..47d3ac2 100644
.valid = pmc_suspend_valid,
.enter = pmc_suspend_enter,
};
-diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
-index bee1c0f..cdd44f3 100644
---- a/arch/s390/Kconfig
-+++ b/arch/s390/Kconfig
-@@ -230,13 +230,12 @@ config AUDIT_ARCH
-
- config S390_EXEC_PROTECT
- bool "Data execute protection"
-+ default y
- help
- This option allows to enable a buffer overflow protection for user
-- space programs and it also selects the addressing mode option above.
-- The kernel parameter noexec=on will enable this feature and also
-- switch the addressing modes, default is disabled. Enabling this (via
-- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
-- will reduce system performance.
-+ space programs.
-+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
-+ reduce system performance.
-
- comment "Code generation options"
-
-diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
-index 354d426..883892c 100644
---- a/arch/s390/include/asm/elf.h
-+++ b/arch/s390/include/asm/elf.h
+diff -urNp linux-2.6.37/arch/s390/include/asm/elf.h linux-2.6.37/arch/s390/include/asm/elf.h
+--- linux-2.6.37/arch/s390/include/asm/elf.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/s390/include/asm/elf.h 2011-01-17 02:41:00.000000000 -0500
@@ -163,6 +163,13 @@ extern unsigned int vdso_enabled;
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
@@ -3319,10 +3304,9 @@ index 354d426..883892c 100644
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
-diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
-index d6b1ed0..071ffbd 100644
---- a/arch/s390/include/asm/uaccess.h
-+++ b/arch/s390/include/asm/uaccess.h
+diff -urNp linux-2.6.37/arch/s390/include/asm/uaccess.h linux-2.6.37/arch/s390/include/asm/uaccess.h
+--- linux-2.6.37/arch/s390/include/asm/uaccess.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/s390/include/asm/uaccess.h 2011-01-17 02:41:00.000000000 -0500
@@ -234,6 +234,10 @@ static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
@@ -3334,7 +3318,7 @@ index d6b1ed0..071ffbd 100644
if (access_ok(VERIFY_WRITE, to, n))
n = __copy_to_user(to, from, n);
return n;
-@@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
+@@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
@@ -3344,7 +3328,7 @@ index d6b1ed0..071ffbd 100644
if (__builtin_constant_p(n) && (n <= 256))
return uaccess.copy_from_user_small(n, from, to);
else
-@@ -293,6 +300,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+@@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us
unsigned int sz = __compiletime_object_size(to);
might_fault();
@@ -3355,11 +3339,31 @@ index d6b1ed0..071ffbd 100644
if (unlikely(sz != -1 && sz < n)) {
copy_from_user_overflow();
return n;
-diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
-index 22cfd63..a70f1db 100644
---- a/arch/s390/kernel/module.c
-+++ b/arch/s390/kernel/module.c
-@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+diff -urNp linux-2.6.37/arch/s390/Kconfig linux-2.6.37/arch/s390/Kconfig
+--- linux-2.6.37/arch/s390/Kconfig 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/s390/Kconfig 2011-01-17 02:41:00.000000000 -0500
+@@ -242,13 +242,12 @@ config AUDIT_ARCH
+
+ config S390_EXEC_PROTECT
+ bool "Data execute protection"
++ default y
+ help
+ This option allows to enable a buffer overflow protection for user
+- space programs and it also selects the addressing mode option above.
+- The kernel parameter noexec=on will enable this feature and also
+- switch the addressing modes, default is disabled. Enabling this (via
+- kernel parameter) on machines earlier than IBM System z9 this will
+- reduce system performance.
++ space programs.
++ Enabling this (via kernel parameter) on machines earlier than IBM
++ System z9 will reduce system performance.
+
+ comment "Code generation options"
+
+diff -urNp linux-2.6.37/arch/s390/kernel/module.c linux-2.6.37/arch/s390/kernel/module.c
+--- linux-2.6.37/arch/s390/kernel/module.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/s390/kernel/module.c 2011-01-17 02:41:00.000000000 -0500
+@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
/* Increase core size by size of got & plt and set start
offsets for got and plt. */
@@ -3376,7 +3380,7 @@ index 22cfd63..a70f1db 100644
return 0;
}
-@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
if (info->got_initialized == 0) {
Elf_Addr *gotent;
@@ -3385,7 +3389,7 @@ index 22cfd63..a70f1db 100644
info->got_offset;
*gotent = val;
info->got_initialized = 1;
-@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
else if (r_type == R_390_GOTENT ||
r_type == R_390_GOTPLTENT)
*(unsigned int *) loc =
@@ -3394,7 +3398,7 @@ index 22cfd63..a70f1db 100644
else if (r_type == R_390_GOT64 ||
r_type == R_390_GOTPLT64)
*(unsigned long *) loc = val;
-@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_initialized == 0) {
unsigned int *ip;
@@ -3403,7 +3407,7 @@ index 22cfd63..a70f1db 100644
info->plt_offset;
#ifndef CONFIG_64BIT
ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
-@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
val - loc + 0xffffUL < 0x1ffffeUL) ||
(r_type == R_390_PLT32DBL &&
val - loc + 0xffffffffULL < 0x1fffffffeULL)))
@@ -3412,7 +3416,7 @@ index 22cfd63..a70f1db 100644
me->arch.plt_offset +
info->plt_offset;
val += rela->r_addend - loc;
-@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
case R_390_GOTOFF64: /* 64 bit offset to GOT. */
val = val + rela->r_addend -
@@ -3421,7 +3425,7 @@ index 22cfd63..a70f1db 100644
if (r_type == R_390_GOTOFF16)
*(unsigned short *) loc = val;
else if (r_type == R_390_GOTOFF32)
-@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
break;
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
@@ -3430,11 +3434,10 @@ index 22cfd63..a70f1db 100644
rela->r_addend - loc;
if (r_type == R_390_GOTPC)
*(unsigned int *) loc = val;
-diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
-index c8e8e13..73bca9f 100644
---- a/arch/s390/kernel/setup.c
-+++ b/arch/s390/kernel/setup.c
-@@ -281,7 +281,7 @@ static int __init early_parse_mem(char *p)
+diff -urNp linux-2.6.37/arch/s390/kernel/setup.c linux-2.6.37/arch/s390/kernel/setup.c
+--- linux-2.6.37/arch/s390/kernel/setup.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/s390/kernel/setup.c 2011-01-17 02:41:00.000000000 -0500
+@@ -281,7 +281,7 @@ static int __init early_parse_mem(char *
}
early_param("mem", early_parse_mem);
@@ -3443,7 +3446,7 @@ index c8e8e13..73bca9f 100644
EXPORT_SYMBOL_GPL(user_mode);
static int set_amode_and_uaccess(unsigned long user_amode,
-@@ -310,17 +310,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
+@@ -310,17 +310,6 @@ static int set_amode_and_uaccess(unsigne
}
}
@@ -3461,7 +3464,7 @@ index c8e8e13..73bca9f 100644
static int __init early_parse_user_mode(char *p)
{
if (p && strcmp(p, "primary") == 0)
-@@ -337,20 +326,6 @@ static int __init early_parse_user_mode(char *p)
+@@ -337,20 +326,6 @@ static int __init early_parse_user_mode(
}
early_param("user_mode", early_parse_user_mode);
@@ -3482,11 +3485,10 @@ index c8e8e13..73bca9f 100644
static void setup_addressing_mode(void)
{
if (user_mode == SECONDARY_SPACE_MODE) {
-diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
-index a8c2af8..93a9d95 100644
---- a/arch/s390/mm/maccess.c
-+++ b/arch/s390/mm/maccess.c
-@@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void *dst, void *src, size_t size)
+diff -urNp linux-2.6.37/arch/s390/mm/maccess.c linux-2.6.37/arch/s390/mm/maccess.c
+--- linux-2.6.37/arch/s390/mm/maccess.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/s390/mm/maccess.c 2011-01-17 02:41:00.000000000 -0500
+@@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void
return rc ? rc : count;
}
@@ -3495,11 +3497,10 @@ index a8c2af8..93a9d95 100644
{
long copied = 0;
-diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
-index 869efba..b9d76aa 100644
---- a/arch/s390/mm/mmap.c
-+++ b/arch/s390/mm/mmap.c
-@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+diff -urNp linux-2.6.37/arch/s390/mm/mmap.c linux-2.6.37/arch/s390/mm/mmap.c
+--- linux-2.6.37/arch/s390/mm/mmap.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/s390/mm/mmap.c 2011-01-17 02:41:00.000000000 -0500
+@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
*/
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE;
@@ -3522,7 +3523,7 @@ index 869efba..b9d76aa 100644
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
-@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
*/
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE;
@@ -3545,11 +3546,34 @@ index 869efba..b9d76aa 100644
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
-diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
-index 4499a37..adc9b4b 100644
---- a/arch/sh/boards/mach-hp6xx/pm.c
-+++ b/arch/sh/boards/mach-hp6xx/pm.c
-@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
+diff -urNp linux-2.6.37/arch/score/include/asm/system.h linux-2.6.37/arch/score/include/asm/system.h
+--- linux-2.6.37/arch/score/include/asm/system.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/score/include/asm/system.h 2011-01-17 02:41:00.000000000 -0500
+@@ -17,7 +17,7 @@ do { \
+ #define finish_arch_switch(prev) do {} while (0)
+
+ typedef void (*vi_handler_t)(void);
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) (x)
+
+ #define mb() barrier()
+ #define rmb() barrier()
+diff -urNp linux-2.6.37/arch/score/kernel/process.c linux-2.6.37/arch/score/kernel/process.c
+--- linux-2.6.37/arch/score/kernel/process.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/score/kernel/process.c 2011-01-17 02:41:00.000000000 -0500
+@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
+
+ return task_pt_regs(task)->cp0_epc;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- return sp;
+-}
+diff -urNp linux-2.6.37/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.37/arch/sh/boards/mach-hp6xx/pm.c
+--- linux-2.6.37/arch/sh/boards/mach-hp6xx/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sh/boards/mach-hp6xx/pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
return 0;
}
@@ -3558,10 +3582,9 @@ index 4499a37..adc9b4b 100644
.enter = hp6x0_pm_enter,
.valid = suspend_valid_only_mem,
};
-diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
-index bea3337..0c92252 100644
---- a/arch/sh/include/asm/dma-mapping.h
-+++ b/arch/sh/include/asm/dma-mapping.h
+diff -urNp linux-2.6.37/arch/sh/include/asm/dma-mapping.h linux-2.6.37/arch/sh/include/asm/dma-mapping.h
+--- linux-2.6.37/arch/sh/include/asm/dma-mapping.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sh/include/asm/dma-mapping.h 2011-01-17 02:41:00.000000000 -0500
@@ -1,10 +1,10 @@
#ifndef __ASM_SH_DMA_MAPPING_H
#define __ASM_SH_DMA_MAPPING_H
@@ -3575,7 +3598,7 @@ index bea3337..0c92252 100644
{
return dma_ops;
}
-@@ -14,7 +14,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+@@ -14,7 +14,7 @@ static inline struct dma_map_ops *get_dm
static inline int dma_supported(struct device *dev, u64 mask)
{
@@ -3584,7 +3607,7 @@ index bea3337..0c92252 100644
if (ops->dma_supported)
return ops->dma_supported(dev, mask);
-@@ -24,7 +24,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
+@@ -24,7 +24,7 @@ static inline int dma_supported(struct d
static inline int dma_set_mask(struct device *dev, u64 mask)
{
@@ -3593,7 +3616,7 @@ index bea3337..0c92252 100644
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
-@@ -59,7 +59,7 @@ static inline int dma_get_cache_alignment(void)
+@@ -44,7 +44,7 @@ void dma_cache_sync(struct device *dev,
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
@@ -3602,7 +3625,7 @@ index bea3337..0c92252 100644
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
-@@ -70,7 +70,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+@@ -55,7 +55,7 @@ static inline int dma_mapping_error(stru
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
@@ -3611,7 +3634,7 @@ index bea3337..0c92252 100644
void *memory;
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
-@@ -87,7 +87,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+@@ -72,7 +72,7 @@ static inline void *dma_alloc_coherent(s
static inline void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
@@ -3620,11 +3643,10 @@ index bea3337..0c92252 100644
if (dma_release_from_coherent(dev, get_order(size), vaddr))
return;
-diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
-index e559687..a6f95ae 100644
---- a/arch/sh/kernel/cpu/shmobile/pm.c
-+++ b/arch/sh/kernel/cpu/shmobile/pm.c
-@@ -141,7 +141,7 @@ static int sh_pm_enter(suspend_state_t state)
+diff -urNp linux-2.6.37/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.37/arch/sh/kernel/cpu/shmobile/pm.c
+--- linux-2.6.37/arch/sh/kernel/cpu/shmobile/pm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sh/kernel/cpu/shmobile/pm.c 2011-01-17 02:41:00.000000000 -0500
+@@ -141,7 +141,7 @@ static int sh_pm_enter(suspend_state_t s
return 0;
}
@@ -3633,11 +3655,10 @@ index e559687..a6f95ae 100644
.enter = sh_pm_enter,
.valid = suspend_valid_only_mem,
};
-diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
-index 3c55b87..92ccab3 100644
---- a/arch/sh/kernel/dma-nommu.c
-+++ b/arch/sh/kernel/dma-nommu.c
-@@ -62,7 +62,7 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
+diff -urNp linux-2.6.37/arch/sh/kernel/dma-nommu.c linux-2.6.37/arch/sh/kernel/dma-nommu.c
+--- linux-2.6.37/arch/sh/kernel/dma-nommu.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sh/kernel/dma-nommu.c 2011-01-17 02:41:00.000000000 -0500
+@@ -62,7 +62,7 @@ static void nommu_sync_sg(struct device
}
#endif
@@ -3646,10 +3667,9 @@ index 3c55b87..92ccab3 100644
.alloc_coherent = dma_generic_alloc_coherent,
.free_coherent = dma_generic_free_coherent,
.map_page = nommu_map_page,
-diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
-index efb6d39..49e7304 100644
---- a/arch/sh/kernel/kgdb.c
-+++ b/arch/sh/kernel/kgdb.c
+diff -urNp linux-2.6.37/arch/sh/kernel/kgdb.c linux-2.6.37/arch/sh/kernel/kgdb.c
+--- linux-2.6.37/arch/sh/kernel/kgdb.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sh/kernel/kgdb.c 2011-01-17 02:41:00.000000000 -0500
@@ -319,7 +319,7 @@ void kgdb_arch_exit(void)
unregister_die_notifier(&kgdb_notifier);
}
@@ -3659,10 +3679,9 @@ index efb6d39..49e7304 100644
/* Breakpoint instruction: trapa #0x3c */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
.gdb_bpt_instr = { 0x3c, 0xc3 },
-diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
-index c86a085..819f30c 100644
---- a/arch/sh/mm/consistent.c
-+++ b/arch/sh/mm/consistent.c
+diff -urNp linux-2.6.37/arch/sh/mm/consistent.c linux-2.6.37/arch/sh/mm/consistent.c
+--- linux-2.6.37/arch/sh/mm/consistent.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sh/mm/consistent.c 2011-01-17 02:41:00.000000000 -0500
@@ -22,7 +22,7 @@
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
@@ -3672,11 +3691,10 @@ index c86a085..819f30c 100644
EXPORT_SYMBOL(dma_ops);
static int __init dma_init(void)
-diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
-index afeb710..fa68ac9 100644
---- a/arch/sh/mm/mmap.c
-+++ b/arch/sh/mm/mmap.c
-@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+diff -urNp linux-2.6.37/arch/sh/mm/mmap.c linux-2.6.37/arch/sh/mm/mmap.c
+--- linux-2.6.37/arch/sh/mm/mmap.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sh/mm/mmap.c 2011-01-17 02:41:00.000000000 -0500
+@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
@@ -3695,7 +3713,7 @@ index afeb710..fa68ac9 100644
/*
* Remember the place where we stopped the search:
*/
-@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
@@ -3705,7 +3723,7 @@ index afeb710..fa68ac9 100644
return addr;
}
-@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr-len);
@@ -3714,7 +3732,7 @@ index afeb710..fa68ac9 100644
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr-len);
}
-@@ -199,7 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -199,7 +197,7 @@ arch_get_unmapped_area_topdown(struct fi
* return with success:
*/
vma = find_vma(mm, addr);
@@ -3723,23 +3741,9 @@ index afeb710..fa68ac9 100644
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
}
-diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
-index 113225b..7fd04e7 100644
---- a/arch/sparc/Makefile
-+++ b/arch/sparc/Makefile
-@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
- # Export what is needed by arch/sparc/boot/Makefile
- export VMLINUX_INIT VMLINUX_MAIN
- VMLINUX_INIT := $(head-y) $(init-y)
--VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
-+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
- VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
- VMLINUX_MAIN += $(drivers-y) $(net-y)
-
-diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
-index bdb2ff8..69e69f3 100644
---- a/arch/sparc/include/asm/atomic_64.h
-+++ b/arch/sparc/include/asm/atomic_64.h
+diff -urNp linux-2.6.37/arch/sparc/include/asm/atomic_64.h linux-2.6.37/arch/sparc/include/asm/atomic_64.h
+--- linux-2.6.37/arch/sparc/include/asm/atomic_64.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/include/asm/atomic_64.h 2011-01-17 02:41:00.000000000 -0500
@@ -14,18 +14,40 @@
#define ATOMIC64_INIT(i) { (i) }
@@ -3781,7 +3785,7 @@ index bdb2ff8..69e69f3 100644
extern int atomic_sub_ret(int, atomic_t *);
extern long atomic64_sub_ret(long, atomic64_t *);
-@@ -33,12 +55,24 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+@@ -33,12 +55,24 @@ extern long atomic64_sub_ret(long, atomi
#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
#define atomic_inc_return(v) atomic_add_ret(1, v)
@@ -3806,7 +3810,7 @@ index bdb2ff8..69e69f3 100644
#define atomic64_add_return(i, v) atomic64_add_ret(i, v)
/*
-@@ -59,10 +93,26 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+@@ -59,10 +93,26 @@ extern long atomic64_sub_ret(long, atomi
#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
#define atomic_inc(v) atomic_add(1, v)
@@ -3833,7 +3837,7 @@ index bdb2ff8..69e69f3 100644
#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
-@@ -72,17 +122,28 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+@@ -72,17 +122,28 @@ extern long atomic64_sub_ret(long, atomi
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
@@ -3866,7 +3870,7 @@ index bdb2ff8..69e69f3 100644
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-@@ -93,17 +154,28 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+@@ -93,17 +154,28 @@ static inline int atomic_add_unless(atom
static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
{
@@ -3899,13 +3903,12 @@ index bdb2ff8..69e69f3 100644
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
-index 4b4a0c0..29925cb 100644
---- a/arch/sparc/include/asm/dma-mapping.h
-+++ b/arch/sparc/include/asm/dma-mapping.h
-@@ -13,10 +13,10 @@ extern int dma_supported(struct device *dev, u64 mask);
+diff -urNp linux-2.6.37/arch/sparc/include/asm/dma-mapping.h linux-2.6.37/arch/sparc/include/asm/dma-mapping.h
+--- linux-2.6.37/arch/sparc/include/asm/dma-mapping.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/include/asm/dma-mapping.h 2011-01-17 02:41:00.000000000 -0500
+@@ -12,10 +12,10 @@ extern int dma_supported(struct device *
+ #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
- #define dma_is_consistent(d, h) (1)
-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
@@ -3916,7 +3919,7 @@ index 4b4a0c0..29925cb 100644
{
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (dev->bus == &pci_bus_type)
-@@ -30,7 +30,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+@@ -29,7 +29,7 @@ static inline struct dma_map_ops *get_dm
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
@@ -3925,7 +3928,7 @@ index 4b4a0c0..29925cb 100644
void *cpu_addr;
cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
-@@ -41,7 +41,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+@@ -40,7 +40,7 @@ static inline void *dma_alloc_coherent(s
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
@@ -3934,10 +3937,9 @@ index 4b4a0c0..29925cb 100644
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
ops->free_coherent(dev, size, cpu_addr, dma_handle);
-diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
-index 4269ca6..e3da77f 100644
---- a/arch/sparc/include/asm/elf_32.h
-+++ b/arch/sparc/include/asm/elf_32.h
+diff -urNp linux-2.6.37/arch/sparc/include/asm/elf_32.h linux-2.6.37/arch/sparc/include/asm/elf_32.h
+--- linux-2.6.37/arch/sparc/include/asm/elf_32.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/include/asm/elf_32.h 2011-01-17 02:41:00.000000000 -0500
@@ -114,6 +114,13 @@ typedef struct {
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
@@ -3952,10 +3954,9 @@ index 4269ca6..e3da77f 100644
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. This can NOT be done in userspace
on Sparc. */
-diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
-index e678803..dfb3071 100644
---- a/arch/sparc/include/asm/elf_64.h
-+++ b/arch/sparc/include/asm/elf_64.h
+diff -urNp linux-2.6.37/arch/sparc/include/asm/elf_64.h linux-2.6.37/arch/sparc/include/asm/elf_64.h
+--- linux-2.6.37/arch/sparc/include/asm/elf_64.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/include/asm/elf_64.h 2011-01-17 02:41:00.000000000 -0500
@@ -162,6 +162,12 @@ typedef struct {
#define ELF_ET_DYN_BASE 0x0000010000000000UL
#define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
@@ -3969,10 +3970,9 @@ index e678803..dfb3071 100644
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. */
-diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
-index 0ece77f..6242e98 100644
---- a/arch/sparc/include/asm/pgtable_32.h
-+++ b/arch/sparc/include/asm/pgtable_32.h
+diff -urNp linux-2.6.37/arch/sparc/include/asm/pgtable_32.h linux-2.6.37/arch/sparc/include/asm/pgtable_32.h
+--- linux-2.6.37/arch/sparc/include/asm/pgtable_32.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/include/asm/pgtable_32.h 2011-01-17 02:41:00.000000000 -0500
@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
BTFIXUPDEF_INT(page_none)
BTFIXUPDEF_INT(page_copy)
@@ -4004,10 +4004,9 @@ index 0ece77f..6242e98 100644
extern unsigned long page_kernel;
#ifdef MODULE
-diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
-index 1407c07..7e10231 100644
---- a/arch/sparc/include/asm/pgtsrmmu.h
-+++ b/arch/sparc/include/asm/pgtsrmmu.h
+diff -urNp linux-2.6.37/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.37/arch/sparc/include/asm/pgtsrmmu.h
+--- linux-2.6.37/arch/sparc/include/asm/pgtsrmmu.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/include/asm/pgtsrmmu.h 2011-01-17 02:41:00.000000000 -0500
@@ -115,6 +115,13 @@
SRMMU_EXEC | SRMMU_REF)
#define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
@@ -4022,11 +4021,10 @@ index 1407c07..7e10231 100644
#define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
SRMMU_DIRTY | SRMMU_REF)
-diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
-index 073936a..9bcd257 100644
---- a/arch/sparc/include/asm/spinlock_64.h
-+++ b/arch/sparc/include/asm/spinlock_64.h
-@@ -99,7 +99,12 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
+diff -urNp linux-2.6.37/arch/sparc/include/asm/spinlock_64.h linux-2.6.37/arch/sparc/include/asm/spinlock_64.h
+--- linux-2.6.37/arch/sparc/include/asm/spinlock_64.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/include/asm/spinlock_64.h 2011-01-17 02:41:00.000000000 -0500
+@@ -99,7 +99,12 @@ static void inline arch_read_lock(arch_r
__asm__ __volatile__ (
"1: ldsw [%2], %0\n"
" brlz,pn %0, 2f\n"
@@ -4040,7 +4038,7 @@ index 073936a..9bcd257 100644
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%icc, 1b\n"
-@@ -112,7 +117,7 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
+@@ -112,7 +117,7 @@ static void inline arch_read_lock(arch_r
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock)
@@ -4049,7 +4047,7 @@ index 073936a..9bcd257 100644
}
static int inline arch_read_trylock(arch_rwlock_t *lock)
-@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
+@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
"1: ldsw [%2], %0\n"
" brlz,a,pn %0, 2f\n"
" mov 0, %0\n"
@@ -4063,7 +4061,7 @@ index 073936a..9bcd257 100644
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%icc, 1b\n"
-@@ -142,7 +152,12 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
+@@ -142,7 +152,12 @@ static void inline arch_read_unlock(arch
__asm__ __volatile__(
"1: lduw [%2], %0\n"
@@ -4077,29 +4075,10 @@ index 073936a..9bcd257 100644
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%xcc, 1b\n"
-diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
-index e88fbe5..96b0ce5 100644
---- a/arch/sparc/include/asm/uaccess.h
-+++ b/arch/sparc/include/asm/uaccess.h
-@@ -1,5 +1,13 @@
- #ifndef ___ASM_SPARC_UACCESS_H
- #define ___ASM_SPARC_UACCESS_H
-+
-+#ifdef __KERNEL__
-+#ifndef __ASSEMBLY__
-+#include <linux/types.h>
-+extern void check_object_size(const void *ptr, unsigned long n, bool to);
-+#endif
-+#endif
-+
- #if defined(__sparc__) && defined(__arch64__)
- #include <asm/uaccess_64.h>
- #else
-diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
-index 25f1d10..a78c5fe 100644
---- a/arch/sparc/include/asm/uaccess_32.h
-+++ b/arch/sparc/include/asm/uaccess_32.h
-@@ -249,14 +249,25 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
+diff -urNp linux-2.6.37/arch/sparc/include/asm/uaccess_32.h linux-2.6.37/arch/sparc/include/asm/uaccess_32.h
+--- linux-2.6.37/arch/sparc/include/asm/uaccess_32.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/include/asm/uaccess_32.h 2011-01-17 02:41:00.000000000 -0500
+@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
{
@@ -4127,19 +4106,12 @@ index 25f1d10..a78c5fe 100644
return __copy_user(to, (__force void __user *) from, n);
}
-@@ -272,19 +283,27 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
- int sz = __compiletime_object_size(to);
-
+- if (n && __access_ok((unsigned long) from, n))
+ if ((long)n < 0)
+ return n;
+
- if (unlikely(sz != -1 && sz < n)) {
- copy_from_user_overflow();
- return n;
- }
-
-- if (n && __access_ok((unsigned long) from, n))
+ if (n && __access_ok((unsigned long) from, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(to, n, false);
@@ -4157,10 +4129,9 @@ index 25f1d10..a78c5fe 100644
return __copy_user((__force void __user *) to, from, n);
}
-diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
-index 2406788..02da64f 100644
---- a/arch/sparc/include/asm/uaccess_64.h
-+++ b/arch/sparc/include/asm/uaccess_64.h
+diff -urNp linux-2.6.37/arch/sparc/include/asm/uaccess_64.h linux-2.6.37/arch/sparc/include/asm/uaccess_64.h
+--- linux-2.6.37/arch/sparc/include/asm/uaccess_64.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/include/asm/uaccess_64.h 2011-01-17 02:41:00.000000000 -0500
@@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/string.h>
@@ -4169,9 +4140,12 @@ index 2406788..02da64f 100644
#include <asm/asi.h>
#include <asm/system.h>
#include <asm/spitfire.h>
-@@ -224,6 +225,12 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
- int sz = __compiletime_object_size(to);
- unsigned long ret = size;
+@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_from_user(to, from, size);
++ unsigned long ret;
+ if ((long)size < 0 || size > INT_MAX)
+ return size;
@@ -4179,10 +4153,11 @@ index 2406788..02da64f 100644
+ if (!__builtin_constant_p(size))
+ check_object_size(to, size, false);
+
- if (likely(sz == -1 || sz >= size)) {
- ret = ___copy_from_user(to, from, size);
- if (unlikely(ret))
-@@ -243,8 +250,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
++ ret = ___copy_from_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_from_user_fixup(to, from, size);
+
+@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long size)
{
@@ -4199,24 +4174,27 @@ index 2406788..02da64f 100644
if (unlikely(ret))
ret = copy_to_user_fixup(to, from, size);
return ret;
-diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
-index 0c2dc1f..7ec02c7 100644
---- a/arch/sparc/kernel/Makefile
-+++ b/arch/sparc/kernel/Makefile
-@@ -3,7 +3,7 @@
- #
-
- asflags-y := -ansi
--ccflags-y := -Werror
-+#ccflags-y := -Werror
-
- extra-y := head_$(BITS).o
- extra-y += init_task.o
-diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
-index 47977a7..639bf96 100644
---- a/arch/sparc/kernel/iommu.c
-+++ b/arch/sparc/kernel/iommu.c
-@@ -828,7 +828,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
+diff -urNp linux-2.6.37/arch/sparc/include/asm/uaccess.h linux-2.6.37/arch/sparc/include/asm/uaccess.h
+--- linux-2.6.37/arch/sparc/include/asm/uaccess.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/include/asm/uaccess.h 2011-01-17 02:41:00.000000000 -0500
+@@ -1,5 +1,13 @@
+ #ifndef ___ASM_SPARC_UACCESS_H
+ #define ___ASM_SPARC_UACCESS_H
++
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#include <linux/types.h>
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++#endif
++#endif
++
+ #if defined(__sparc__) && defined(__arch64__)
+ #include <asm/uaccess_64.h>
+ #else
+diff -urNp linux-2.6.37/arch/sparc/kernel/iommu.c linux-2.6.37/arch/sparc/kernel/iommu.c
+--- linux-2.6.37/arch/sparc/kernel/iommu.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/iommu.c 2011-01-17 02:41:00.000000000 -0500
+@@ -828,7 +828,7 @@ static void dma_4u_sync_sg_for_cpu(struc
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -4225,7 +4203,7 @@ index 47977a7..639bf96 100644
.alloc_coherent = dma_4u_alloc_coherent,
.free_coherent = dma_4u_free_coherent,
.map_page = dma_4u_map_page,
-@@ -839,7 +839,7 @@ static struct dma_map_ops sun4u_dma_ops = {
+@@ -839,7 +839,7 @@ static struct dma_map_ops sun4u_dma_ops
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
};
@@ -4234,11 +4212,10 @@ index 47977a7..639bf96 100644
EXPORT_SYMBOL(dma_ops);
extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
-diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
-index 703e4aa..043a5f2 100644
---- a/arch/sparc/kernel/ioport.c
-+++ b/arch/sparc/kernel/ioport.c
-@@ -397,7 +397,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+diff -urNp linux-2.6.37/arch/sparc/kernel/ioport.c linux-2.6.37/arch/sparc/kernel/ioport.c
+--- linux-2.6.37/arch/sparc/kernel/ioport.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/ioport.c 2011-01-17 02:41:00.000000000 -0500
+@@ -397,7 +397,7 @@ static void sbus_sync_sg_for_device(stru
BUG();
}
@@ -4256,7 +4233,7 @@ index 703e4aa..043a5f2 100644
EXPORT_SYMBOL(dma_ops);
static int __init sparc_register_ioport(void)
-@@ -645,7 +645,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
+@@ -645,7 +645,7 @@ static void pci32_sync_sg_for_device(str
}
}
@@ -4265,11 +4242,10 @@ index 703e4aa..043a5f2 100644
.alloc_coherent = pci32_alloc_coherent,
.free_coherent = pci32_free_coherent,
.map_page = pci32_map_page,
-diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
-index 539243b..d61227d 100644
---- a/arch/sparc/kernel/kgdb_32.c
-+++ b/arch/sparc/kernel/kgdb_32.c
-@@ -164,7 +164,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+diff -urNp linux-2.6.37/arch/sparc/kernel/kgdb_32.c linux-2.6.37/arch/sparc/kernel/kgdb_32.c
+--- linux-2.6.37/arch/sparc/kernel/kgdb_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/kgdb_32.c 2011-01-17 02:41:00.000000000 -0500
+@@ -164,7 +164,7 @@ void kgdb_arch_set_pc(struct pt_regs *re
regs->npc = regs->pc + 4;
}
@@ -4278,11 +4254,10 @@ index 539243b..d61227d 100644
/* Breakpoint instruction: ta 0x7d */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
};
-diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
-index 768290a..d8a4286 100644
---- a/arch/sparc/kernel/kgdb_64.c
-+++ b/arch/sparc/kernel/kgdb_64.c
-@@ -187,7 +187,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+diff -urNp linux-2.6.37/arch/sparc/kernel/kgdb_64.c linux-2.6.37/arch/sparc/kernel/kgdb_64.c
+--- linux-2.6.37/arch/sparc/kernel/kgdb_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/kgdb_64.c 2011-01-17 02:41:00.000000000 -0500
+@@ -187,7 +187,7 @@ void kgdb_arch_set_pc(struct pt_regs *re
regs->tnpc = regs->tpc + 4;
}
@@ -4291,11 +4266,22 @@ index 768290a..d8a4286 100644
/* Breakpoint instruction: ta 0x72 */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
};
-diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
-index a24af6f..bc0f1cf 100644
---- a/arch/sparc/kernel/pci_sun4v.c
-+++ b/arch/sparc/kernel/pci_sun4v.c
-@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
+diff -urNp linux-2.6.37/arch/sparc/kernel/Makefile linux-2.6.37/arch/sparc/kernel/Makefile
+--- linux-2.6.37/arch/sparc/kernel/Makefile 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/Makefile 2011-01-17 02:41:00.000000000 -0500
+@@ -3,7 +3,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ extra-y := head_$(BITS).o
+ extra-y += init_task.o
+diff -urNp linux-2.6.37/arch/sparc/kernel/pci_sun4v.c linux-2.6.37/arch/sparc/kernel/pci_sun4v.c
+--- linux-2.6.37/arch/sparc/kernel/pci_sun4v.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/pci_sun4v.c 2011-01-17 02:41:00.000000000 -0500
+@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -4304,11 +4290,86 @@ index a24af6f..bc0f1cf 100644
.alloc_coherent = dma_4v_alloc_coherent,
.free_coherent = dma_4v_free_coherent,
.map_page = dma_4v_map_page,
-diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
-index ee995b7..2393b36 100644
---- a/arch/sparc/kernel/sys_sparc_32.c
-+++ b/arch/sparc/kernel/sys_sparc_32.c
-@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+diff -urNp linux-2.6.37/arch/sparc/kernel/process_32.c linux-2.6.37/arch/sparc/kernel/process_32.c
+--- linux-2.6.37/arch/sparc/kernel/process_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/process_32.c 2011-01-17 02:41:00.000000000 -0500
+@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
+ rw->ins[4], rw->ins[5],
+ rw->ins[6],
+ rw->ins[7]);
+- printk("%pS\n", (void *) rw->ins[7]);
++ printk("%pA\n", (void *) rw->ins[7]);
+ rw = (struct reg_window32 *) rw->ins[6];
+ }
+ spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
+@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
+
+ printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
+ r->psr, r->pc, r->npc, r->y, print_tainted());
+- printk("PC: <%pS>\n", (void *) r->pc);
++ printk("PC: <%pA>\n", (void *) r->pc);
+ printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
+ r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
+ printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
+ r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
+
+ printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
+ rw = (struct reg_window32 *) fp;
+ pc = rw->ins[7];
+ printk("[%08lx : ", pc);
+- printk("%pS ] ", (void *) pc);
++ printk("%pA ] ", (void *) pc);
+ fp = rw->ins[6];
+ } while (++count < 16);
+ printk("\n");
+diff -urNp linux-2.6.37/arch/sparc/kernel/process_64.c linux-2.6.37/arch/sparc/kernel/process_64.c
+--- linux-2.6.37/arch/sparc/kernel/process_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/process_64.c 2011-01-17 02:41:00.000000000 -0500
+@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
+ printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+ rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
+ if (regs->tstate & TSTATE_PRIV)
+- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
++ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
+ }
+
+ void show_regs(struct pt_regs *regs)
+ {
+ printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
+ regs->tpc, regs->tnpc, regs->y, print_tainted());
+- printk("TPC: <%pS>\n", (void *) regs->tpc);
++ printk("TPC: <%pA>\n", (void *) regs->tpc);
+ printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
+ printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
+ show_regwindow(regs);
+ show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
+ }
+@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
+ ((tp && tp->task) ? tp->task->pid : -1));
+
+ if (gp->tstate & TSTATE_PRIV) {
+- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
++ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
+ (void *) gp->tpc,
+ (void *) gp->o7,
+ (void *) gp->i7,
+diff -urNp linux-2.6.37/arch/sparc/kernel/sys_sparc_32.c linux-2.6.37/arch/sparc/kernel/sys_sparc_32.c
+--- linux-2.6.37/arch/sparc/kernel/sys_sparc_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/sys_sparc_32.c 2011-01-17 02:41:00.000000000 -0500
+@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
if (ARCH_SUN4C && len > 0x20000000)
return -ENOMEM;
if (!addr)
@@ -4317,7 +4378,7 @@ index ee995b7..2393b36 100644
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr);
-@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
}
if (TASK_SIZE - PAGE_SIZE - len < addr)
return -ENOMEM;
@@ -4326,11 +4387,10 @@ index ee995b7..2393b36 100644
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
-diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
-index 3d435c4..14d838e 100644
---- a/arch/sparc/kernel/sys_sparc_64.c
-+++ b/arch/sparc/kernel/sys_sparc_64.c
-@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+diff -urNp linux-2.6.37/arch/sparc/kernel/sys_sparc_64.c linux-2.6.37/arch/sparc/kernel/sys_sparc_64.c
+--- linux-2.6.37/arch/sparc/kernel/sys_sparc_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/sys_sparc_64.c 2011-01-17 02:41:00.000000000 -0500
+@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
@@ -4339,7 +4399,7 @@ index 3d435c4..14d838e 100644
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
-@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
@@ -4350,7 +4410,7 @@ index 3d435c4..14d838e 100644
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
-@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
@@ -4387,7 +4447,7 @@ index 3d435c4..14d838e 100644
/*
* Remember the place where we stopped the search:
*/
-@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
@@ -4396,7 +4456,7 @@ index 3d435c4..14d838e 100644
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
-@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
@@ -4406,7 +4466,7 @@ index 3d435c4..14d838e 100644
return addr;
}
-@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr-len);
@@ -4415,7 +4475,7 @@ index 3d435c4..14d838e 100644
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr-len);
}
-@@ -278,7 +280,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -278,7 +280,7 @@ arch_get_unmapped_area_topdown(struct fi
* return with success:
*/
vma = find_vma(mm, addr);
@@ -4424,7 +4484,7 @@ index 3d435c4..14d838e 100644
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
}
-@@ -385,6 +387,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -385,6 +387,12 @@ void arch_pick_mmap_layout(struct mm_str
gap == RLIM_INFINITY ||
sysctl_legacy_va_layout) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
@@ -4437,7 +4497,7 @@ index 3d435c4..14d838e 100644
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
-@@ -397,6 +405,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -397,6 +405,12 @@ void arch_pick_mmap_layout(struct mm_str
gap = (task_size / 6 * 5);
mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
@@ -4450,11 +4510,31 @@ index 3d435c4..14d838e 100644
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
-diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
-index 42ad2ba..3aafd82 100644
---- a/arch/sparc/kernel/traps_64.c
-+++ b/arch/sparc/kernel/traps_64.c
-@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
+diff -urNp linux-2.6.37/arch/sparc/kernel/traps_32.c linux-2.6.37/arch/sparc/kernel/traps_32.c
+--- linux-2.6.37/arch/sparc/kernel/traps_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/traps_32.c 2011-01-17 02:41:00.000000000 -0500
+@@ -76,7 +76,7 @@ void die_if_kernel(char *str, struct pt_
+ count++ < 30 &&
+ (((unsigned long) rw) >= PAGE_OFFSET) &&
+ !(((unsigned long) rw) & 0x7)) {
+- printk("Caller[%08lx]: %pS\n", rw->ins[7],
++ printk("Caller[%08lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+ rw = (struct reg_window32 *)rw->ins[6];
+ }
+diff -urNp linux-2.6.37/arch/sparc/kernel/traps_64.c linux-2.6.37/arch/sparc/kernel/traps_64.c
+--- linux-2.6.37/arch/sparc/kernel/traps_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/traps_64.c 2011-01-17 02:41:00.000000000 -0500
+@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
+ i + 1,
+ p->trapstack[i].tstate, p->trapstack[i].tpc,
+ p->trapstack[i].tnpc, p->trapstack[i].tt);
+- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
++ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
+ }
+ }
+
+@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
lvl -= 0x100;
if (regs->tstate & TSTATE_PRIV) {
@@ -4467,7 +4547,7 @@ index 42ad2ba..3aafd82 100644
sprintf(buffer, "Kernel bad sw trap %lx", lvl);
die_if_kernel(buffer, regs);
}
-@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
+@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
void bad_trap_tl1(struct pt_regs *regs, long lvl)
{
char buffer[32];
@@ -4485,10 +4565,97 @@ index 42ad2ba..3aafd82 100644
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
sprintf (buffer, "Bad trap %lx at tl>0", lvl);
-diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
-index 0268210..f0291ca 100644
---- a/arch/sparc/lib/atomic_64.S
-+++ b/arch/sparc/lib/atomic_64.S
+@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
+ regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
+ printk("%s" "ERROR(%d): ",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
+- printk("TPC<%pS>\n", (void *) regs->tpc);
++ printk("TPC<%pA>\n", (void *) regs->tpc);
+ printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
+@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
+ panic("Irrecoverable Cheetah+ parity error.");
+ }
+
+@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
+ }
+
+ struct sun4v_error_entry {
+@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
+
+ printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
+
+ printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -2196,13 +2207,13 @@ void show_stack(struct task_struct *tsk,
+ fp = (unsigned long)sf->fp + STACK_BIAS;
+ }
+
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = tsk->curr_ret_stack;
+ if (tsk->ret_stack && index >= graph) {
+ pc = tsk->ret_stack[index - graph].ret;
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ graph++;
+ }
+ }
+@@ -2255,7 +2266,7 @@ void die_if_kernel(char *str, struct pt_
+ while (rw &&
+ count++ < 30 &&
+ kstack_valid(tp, (unsigned long) rw)) {
+- printk("Caller[%016lx]: %pS\n", rw->ins[7],
++ printk("Caller[%016lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+
+ rw = kernel_stack_up(rw);
+diff -urNp linux-2.6.37/arch/sparc/kernel/unaligned_64.c linux-2.6.37/arch/sparc/kernel/unaligned_64.c
+--- linux-2.6.37/arch/sparc/kernel/unaligned_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/kernel/unaligned_64.c 2011-01-17 02:41:00.000000000 -0500
+@@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+
+ if (__ratelimit(&ratelimit)) {
+- printk("Kernel unaligned access at TPC[%lx] %pS\n",
++ printk("Kernel unaligned access at TPC[%lx] %pA\n",
+ regs->tpc, (void *) regs->tpc);
+ }
+ }
+diff -urNp linux-2.6.37/arch/sparc/lib/atomic_64.S linux-2.6.37/arch/sparc/lib/atomic_64.S
+--- linux-2.6.37/arch/sparc/lib/atomic_64.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/lib/atomic_64.S 2011-01-17 02:41:00.000000000 -0500
@@ -18,7 +18,12 @@
atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
@@ -4502,8 +4669,8 @@ index 0268210..f0291ca 100644
+
cas [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %icc, 2f
-@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_add, .-atomic_add
@@ -4536,8 +4703,8 @@ index 0268210..f0291ca 100644
+
cas [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %icc, 2f
-@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_sub, .-atomic_sub
@@ -4570,8 +4737,8 @@ index 0268210..f0291ca 100644
+
cas [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %icc, 2f
-@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_add_ret, .-atomic_add_ret
@@ -4605,8 +4772,8 @@ index 0268210..f0291ca 100644
+
cas [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %icc, 2f
-@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
@@ -4619,8 +4786,8 @@ index 0268210..f0291ca 100644
+
casx [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %xcc, 2f
-@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_add, .-atomic64_add
@@ -4653,8 +4820,8 @@ index 0268210..f0291ca 100644
+
casx [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %xcc, 2f
-@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_sub, .-atomic64_sub
@@ -4687,8 +4854,8 @@ index 0268210..f0291ca 100644
+
casx [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %xcc, 2f
-@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_add_ret, .-atomic64_add_ret
@@ -4722,11 +4889,10 @@ index 0268210..f0291ca 100644
+
casx [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %xcc, 2f
-diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
-index 1b30bb3..ab5bb67 100644
---- a/arch/sparc/lib/ksyms.c
-+++ b/arch/sparc/lib/ksyms.c
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+diff -urNp linux-2.6.37/arch/sparc/lib/ksyms.c linux-2.6.37/arch/sparc/lib/ksyms.c
+--- linux-2.6.37/arch/sparc/lib/ksyms.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/lib/ksyms.c 2011-01-17 02:41:00.000000000 -0500
@@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write);
/* Atomic counter implementation. */
@@ -4745,125 +4911,21 @@ index 1b30bb3..ab5bb67 100644
EXPORT_SYMBOL(atomic64_sub_ret);
/* Atomic bit operations. */
-diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
-index 91a7d29..ce75c29 100644
---- a/arch/sparc/lib/rwsem_64.S
-+++ b/arch/sparc/lib/rwsem_64.S
-@@ -11,7 +11,12 @@
- .globl __down_read
- __down_read:
- 1: lduw [%o0], %g1
-- add %g1, 1, %g7
-+ addcc %g1, 1, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %icc, 6
-+#endif
-+
- cas [%o0], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, 1b
-@@ -33,7 +38,12 @@ __down_read:
- .globl __down_read_trylock
- __down_read_trylock:
- 1: lduw [%o0], %g1
-- add %g1, 1, %g7
-+ addcc %g1, 1, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %icc, 6
-+#endif
-+
- cmp %g7, 0
- bl,pn %icc, 2f
- mov 0, %o1
-@@ -51,7 +61,12 @@ __down_write:
- or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
- 1:
- lduw [%o0], %g3
-- add %g3, %g1, %g7
-+ addcc %g3, %g1, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %icc, 6
-+#endif
-+
- cas [%o0], %g3, %g7
- cmp %g3, %g7
- bne,pn %icc, 1b
-@@ -77,7 +92,12 @@ __down_write_trylock:
- cmp %g3, 0
- bne,pn %icc, 2f
- mov 0, %o1
-- add %g3, %g1, %g7
-+ addcc %g3, %g1, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %icc, 6
-+#endif
-+
- cas [%o0], %g3, %g7
- cmp %g3, %g7
- bne,pn %icc, 1b
-@@ -90,7 +110,12 @@ __down_write_trylock:
- __up_read:
- 1:
- lduw [%o0], %g1
-- sub %g1, 1, %g7
-+ subcc %g1, 1, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %icc, 6
-+#endif
-+
- cas [%o0], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, 1b
-@@ -118,7 +143,12 @@ __up_write:
- or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
- 1:
- lduw [%o0], %g3
-- sub %g3, %g1, %g7
-+ subcc %g3, %g1, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %icc, 6
-+#endif
-+
- cas [%o0], %g3, %g7
- cmp %g3, %g7
- bne,pn %icc, 1b
-@@ -143,7 +173,12 @@ __downgrade_write:
- or %g1, %lo(RWSEM_WAITING_BIAS), %g1
- 1:
- lduw [%o0], %g3
-- sub %g3, %g1, %g7
-+ subcc %g3, %g1, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %icc, 6
-+#endif
-+
- cas [%o0], %g3, %g7
- cmp %g3, %g7
- bne,pn %icc, 1b
-diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
-index 79836a7..62f47a2 100644
---- a/arch/sparc/mm/Makefile
-+++ b/arch/sparc/mm/Makefile
-@@ -2,7 +2,7 @@
- #
-
- asflags-y := -ansi
--ccflags-y := -Werror
-+#ccflags-y := -Werror
+diff -urNp linux-2.6.37/arch/sparc/Makefile linux-2.6.37/arch/sparc/Makefile
+--- linux-2.6.37/arch/sparc/Makefile 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/Makefile 2011-01-17 02:41:00.000000000 -0500
+@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
+ # Export what is needed by arch/sparc/boot/Makefile
+ export VMLINUX_INIT VMLINUX_MAIN
+ VMLINUX_INIT := $(head-y) $(init-y)
+-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+ VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
+ VMLINUX_MAIN += $(drivers-y) $(net-y)
- obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
- obj-y += fault_$(BITS).o
-diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
-index bd86016..1417ea6 100644
---- a/arch/sparc/mm/fault_32.c
-+++ b/arch/sparc/mm/fault_32.c
+diff -urNp linux-2.6.37/arch/sparc/mm/fault_32.c linux-2.6.37/arch/sparc/mm/fault_32.c
+--- linux-2.6.37/arch/sparc/mm/fault_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/mm/fault_32.c 2011-01-17 02:41:00.000000000 -0500
@@ -22,6 +22,9 @@
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -4874,7 +4936,7 @@ index bd86016..1417ea6 100644
#include <asm/system.h>
#include <asm/page.h>
-@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
return safe_compute_effective_address(regs, insn);
}
@@ -5168,10 +5230,9 @@ index bd86016..1417ea6 100644
/* Allow reads even for write-only mappings */
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
-diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
-index f92ce56..359834d 100644
---- a/arch/sparc/mm/fault_64.c
-+++ b/arch/sparc/mm/fault_64.c
+diff -urNp linux-2.6.37/arch/sparc/mm/fault_64.c linux-2.6.37/arch/sparc/mm/fault_64.c
+--- linux-2.6.37/arch/sparc/mm/fault_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/mm/fault_64.c 2011-01-17 02:41:00.000000000 -0500
@@ -21,6 +21,9 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
@@ -5182,7 +5243,16 @@ index f92ce56..359834d 100644
#include <asm/page.h>
#include <asm/pgtable.h>
-@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
+@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
+ printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+ regs->tpc);
+ printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
+- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
++ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
+ printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
+ dump_stack();
+ unhandled_fault(regs->tpc, current, regs);
+@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
show_regs(regs);
}
@@ -5640,7 +5710,7 @@ index f92ce56..359834d 100644
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
-@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
if (!vma)
goto bad_area;
@@ -5670,10 +5740,9 @@ index f92ce56..359834d 100644
/* Pure DTLB misses do not tell us whether the fault causing
* load/store/atomic was a write or not, it only says that there
* was no match. So in such a case we (carefully) read the
-diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
-index 5fdddf1..bfa67f0 100644
---- a/arch/sparc/mm/hugetlbpage.c
-+++ b/arch/sparc/mm/hugetlbpage.c
+diff -urNp linux-2.6.37/arch/sparc/mm/hugetlbpage.c linux-2.6.37/arch/sparc/mm/hugetlbpage.c
+--- linux-2.6.37/arch/sparc/mm/hugetlbpage.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/mm/hugetlbpage.c 2011-01-17 02:41:00.000000000 -0500
@@ -68,7 +68,7 @@ full_search:
}
return -ENOMEM;
@@ -5683,7 +5752,7 @@ index 5fdddf1..bfa67f0 100644
/*
* Remember the place where we stopped the search:
*/
-@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr-len);
@@ -5692,7 +5761,7 @@ index 5fdddf1..bfa67f0 100644
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr-len);
}
-@@ -125,7 +125,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -125,7 +125,7 @@ hugetlb_get_unmapped_area_topdown(struct
* return with success:
*/
vma = find_vma(mm, addr);
@@ -5701,7 +5770,7 @@ index 5fdddf1..bfa67f0 100644
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
}
-@@ -182,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -182,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *f
if (addr) {
addr = ALIGN(addr, HPAGE_SIZE);
vma = find_vma(mm, addr);
@@ -5711,10 +5780,9 @@ index 5fdddf1..bfa67f0 100644
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
-diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
-index 6d0e02c..c0b5fa7 100644
---- a/arch/sparc/mm/init_32.c
-+++ b/arch/sparc/mm/init_32.c
+diff -urNp linux-2.6.37/arch/sparc/mm/init_32.c linux-2.6.37/arch/sparc/mm/init_32.c
+--- linux-2.6.37/arch/sparc/mm/init_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/mm/init_32.c 2011-01-17 02:41:00.000000000 -0500
@@ -318,6 +318,9 @@ extern void device_scan(void);
pgprot_t PAGE_SHARED __read_mostly;
EXPORT_SYMBOL(PAGE_SHARED);
@@ -5749,11 +5817,22 @@ index 6d0e02c..c0b5fa7 100644
protection_map[12] = PAGE_READONLY;
protection_map[13] = PAGE_READONLY;
protection_map[14] = PAGE_SHARED;
-diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
-index b0b43aa..22982cd 100644
---- a/arch/sparc/mm/srmmu.c
-+++ b/arch/sparc/mm/srmmu.c
-@@ -2198,6 +2198,13 @@ void __init ld_mmu_srmmu(void)
+diff -urNp linux-2.6.37/arch/sparc/mm/Makefile linux-2.6.37/arch/sparc/mm/Makefile
+--- linux-2.6.37/arch/sparc/mm/Makefile 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/mm/Makefile 2011-01-17 02:41:00.000000000 -0500
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
+ obj-y += fault_$(BITS).o
+diff -urNp linux-2.6.37/arch/sparc/mm/srmmu.c linux-2.6.37/arch/sparc/mm/srmmu.c
+--- linux-2.6.37/arch/sparc/mm/srmmu.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/sparc/mm/srmmu.c 2011-01-17 02:41:00.000000000 -0500
+@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
@@ -5767,10 +5846,9 @@ index b0b43aa..22982cd 100644
BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
-diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
-index 6c03acd..a5e0215 100644
---- a/arch/um/include/asm/kmap_types.h
-+++ b/arch/um/include/asm/kmap_types.h
+diff -urNp linux-2.6.37/arch/um/include/asm/kmap_types.h linux-2.6.37/arch/um/include/asm/kmap_types.h
+--- linux-2.6.37/arch/um/include/asm/kmap_types.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/um/include/asm/kmap_types.h 2011-01-17 02:41:00.000000000 -0500
@@ -23,6 +23,7 @@ enum km_type {
KM_IRQ1,
KM_SOFTIRQ0,
@@ -5779,10 +5857,9 @@ index 6c03acd..a5e0215 100644
KM_TYPE_NR
};
-diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
-index 4cc9b6c..02e5029 100644
---- a/arch/um/include/asm/page.h
-+++ b/arch/um/include/asm/page.h
+diff -urNp linux-2.6.37/arch/um/include/asm/page.h linux-2.6.37/arch/um/include/asm/page.h
+--- linux-2.6.37/arch/um/include/asm/page.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/um/include/asm/page.h 2011-01-17 02:41:00.000000000 -0500
@@ -14,6 +14,9 @@
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
@@ -5793,10 +5870,35 @@ index 4cc9b6c..02e5029 100644
#ifndef __ASSEMBLY__
struct page;
-diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
-index 70ca357..728d1cc 100644
---- a/arch/um/sys-i386/syscalls.c
-+++ b/arch/um/sys-i386/syscalls.c
+diff -urNp linux-2.6.37/arch/um/kernel/process.c linux-2.6.37/arch/um/kernel/process.c
+--- linux-2.6.37/arch/um/kernel/process.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/um/kernel/process.c 2011-01-17 02:41:00.000000000 -0500
+@@ -404,22 +404,6 @@ int singlestepping(void * t)
+ return 2;
+ }
+
+-/*
+- * Only x86 and x86_64 have an arch_align_stack().
+- * All other arches have "#define arch_align_stack(x) (x)"
+- * in their asm/system.h
+- * As this is included in UML from asm-um/system-generic.h,
+- * we can use it to behave as the subarch does.
+- */
+-#ifndef arch_align_stack
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
+-#endif
+-
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ unsigned long stack_page, sp, ip;
+diff -urNp linux-2.6.37/arch/um/sys-i386/syscalls.c linux-2.6.37/arch/um/sys-i386/syscalls.c
+--- linux-2.6.37/arch/um/sys-i386/syscalls.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/um/sys-i386/syscalls.c 2011-01-17 02:41:00.000000000 -0500
@@ -11,6 +11,21 @@
#include "asm/uaccess.h"
#include "asm/unistd.h"
@@ -5819,140 +5921,10 @@ index 70ca357..728d1cc 100644
/*
* The prototype on i386 is:
*
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index f942bb7..d2fc06f 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -1038,7 +1038,7 @@ choice
-
- config NOHIGHMEM
- bool "off"
-- depends on !X86_NUMAQ
-+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
- ---help---
- Linux can use up to 64 Gigabytes of physical memory on x86 systems.
- However, the address space of 32-bit x86 processors is only 4
-@@ -1075,7 +1075,7 @@ config NOHIGHMEM
-
- config HIGHMEM4G
- bool "4GB"
-- depends on !X86_NUMAQ
-+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
- ---help---
- Select this if you have a 32-bit processor and between 1 and 4
- gigabytes of physical RAM.
-@@ -1129,7 +1129,7 @@ config PAGE_OFFSET
- hex
- default 0xB0000000 if VMSPLIT_3G_OPT
- default 0x80000000 if VMSPLIT_2G
-- default 0x78000000 if VMSPLIT_2G_OPT
-+ default 0x70000000 if VMSPLIT_2G_OPT
- default 0x40000000 if VMSPLIT_1G
- default 0xC0000000
- depends on X86_32
-@@ -1461,7 +1461,7 @@ config ARCH_USES_PG_UNCACHED
-
- config EFI
- bool "EFI runtime service support"
-- depends on ACPI
-+ depends on ACPI && !PAX_KERNEXEC
- ---help---
- This enables the kernel to use EFI runtime services that are
- available (such as the EFI variable services).
-@@ -1548,6 +1548,7 @@ config KEXEC_JUMP
- config PHYSICAL_START
- hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
- default "0x1000000"
-+ range 0x400000 0x40000000
- ---help---
- This gives the physical address where the kernel is loaded.
-
-@@ -1611,6 +1612,7 @@ config X86_NEED_RELOCS
- config PHYSICAL_ALIGN
- hex "Alignment value to which kernel should be aligned" if X86_32
- default "0x1000000"
-+ range 0x400000 0x1000000 if PAX_KERNEXEC
- range 0x2000 0x1000000
- ---help---
- This value puts the alignment restrictions on physical address
-@@ -1642,9 +1644,10 @@ config HOTPLUG_CPU
- Say N if you want to disable CPU hotplug.
-
- config COMPAT_VDSO
-- def_bool y
-+ def_bool n
- prompt "Compat VDSO support"
- depends on X86_32 || IA32_EMULATION
-+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
- ---help---
- Map the 32-bit VDSO to the predictable old-style address too.
-
-diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 2ac9069..483cbad 100644
---- a/arch/x86/Kconfig.cpu
-+++ b/arch/x86/Kconfig.cpu
-@@ -336,7 +336,7 @@ config X86_PPRO_FENCE
-
- config X86_F00F_BUG
- def_bool y
-- depends on M586MMX || M586TSC || M586 || M486 || M386
-+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
-
- config X86_INVD_BUG
- def_bool y
-@@ -360,7 +360,7 @@ config X86_POPAD_OK
-
- config X86_ALIGNMENT_16
- def_bool y
-- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
-+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
-
- config X86_INTEL_USERCOPY
- def_bool y
-@@ -406,7 +406,7 @@ config X86_CMPXCHG64
- # generates cmov.
- config X86_CMOV
- def_bool y
-- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-
- config X86_MINIMUM_CPU_FAMILY
- int
-diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
-index 7508508..2c53b5e 100644
---- a/arch/x86/Kconfig.debug
-+++ b/arch/x86/Kconfig.debug
-@@ -97,7 +97,7 @@ config X86_PTDUMP
- config DEBUG_RODATA
- bool "Write protect kernel read-only data structures"
- default y
-- depends on DEBUG_KERNEL
-+ depends on DEBUG_KERNEL && BROKEN
- ---help---
- Mark the kernel read-only data as write-protected in the pagetables,
- in order to catch accidental (and incorrect) writes to such const
-diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 8aa1b59..4094bef 100644
---- a/arch/x86/Makefile
-+++ b/arch/x86/Makefile
-@@ -191,3 +191,12 @@ define archhelp
- echo ' FDARGS="..." arguments for the booted kernel'
- echo ' FDINITRD=file initrd for the booted kernel'
- endef
-+
-+define OLD_LD
-+
-+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
-+*** Please upgrade your binutils to 2.18 or newer
-+endef
-+
-+archprepare:
-+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
-diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
-index 878e4b9..20537ab 100644
---- a/arch/x86/boot/bitops.h
-+++ b/arch/x86/boot/bitops.h
-@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
+diff -urNp linux-2.6.37/arch/x86/boot/bitops.h linux-2.6.37/arch/x86/boot/bitops.h
+--- linux-2.6.37/arch/x86/boot/bitops.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/boot/bitops.h 2011-01-17 02:41:00.000000000 -0500
+@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
u8 v;
const u32 *p = (const u32 *)addr;
@@ -5961,7 +5933,7 @@ index 878e4b9..20537ab 100644
return v;
}
-@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
+@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
static inline void set_bit(int nr, void *addr)
{
@@ -5970,11 +5942,10 @@ index 878e4b9..20537ab 100644
}
#endif /* BOOT_BITOPS_H */
-diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
-index 98239d2..f40214c 100644
---- a/arch/x86/boot/boot.h
-+++ b/arch/x86/boot/boot.h
-@@ -82,7 +82,7 @@ static inline void io_delay(void)
+diff -urNp linux-2.6.37/arch/x86/boot/boot.h linux-2.6.37/arch/x86/boot/boot.h
+--- linux-2.6.37/arch/x86/boot/boot.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/boot/boot.h 2011-01-17 02:41:00.000000000 -0500
+@@ -85,7 +85,7 @@ static inline void io_delay(void)
static inline u16 ds(void)
{
u16 seg;
@@ -5983,7 +5954,7 @@ index 98239d2..f40214c 100644
return seg;
}
-@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
+@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
static inline int memcmp(const void *s1, const void *s2, size_t len)
{
u8 diff;
@@ -5992,10 +5963,9 @@ index 98239d2..f40214c 100644
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
-diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
-index f543b70..b60fba8 100644
---- a/arch/x86/boot/compressed/head_32.S
-+++ b/arch/x86/boot/compressed/head_32.S
+diff -urNp linux-2.6.37/arch/x86/boot/compressed/head_32.S linux-2.6.37/arch/x86/boot/compressed/head_32.S
+--- linux-2.6.37/arch/x86/boot/compressed/head_32.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/boot/compressed/head_32.S 2011-01-17 02:41:00.000000000 -0500
@@ -76,7 +76,7 @@ ENTRY(startup_32)
notl %eax
andl %eax, %ebx
@@ -6005,7 +5975,7 @@ index f543b70..b60fba8 100644
#endif
/* Target address to relocate to for decompression */
-@@ -149,7 +149,7 @@ relocated:
+@@ -162,7 +162,7 @@ relocated:
* and where it was actually loaded.
*/
movl %ebp, %ebx
@@ -6014,7 +5984,7 @@ index f543b70..b60fba8 100644
jz 2f /* Nothing to be done if loaded at compiled addr. */
/*
* Process relocations.
-@@ -157,8 +157,7 @@ relocated:
+@@ -170,8 +170,7 @@ relocated:
1: subl $4, %edi
movl (%edi), %ecx
@@ -6024,10 +5994,9 @@ index f543b70..b60fba8 100644
addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
jmp 1b
2:
-diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
-index faff0dc..f859ead 100644
---- a/arch/x86/boot/compressed/head_64.S
-+++ b/arch/x86/boot/compressed/head_64.S
+diff -urNp linux-2.6.37/arch/x86/boot/compressed/head_64.S linux-2.6.37/arch/x86/boot/compressed/head_64.S
+--- linux-2.6.37/arch/x86/boot/compressed/head_64.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/boot/compressed/head_64.S 2011-01-17 02:41:00.000000000 -0500
@@ -91,7 +91,7 @@ ENTRY(startup_32)
notl %eax
andl %eax, %ebx
@@ -6046,11 +6015,10 @@ index faff0dc..f859ead 100644
#endif
/* Target address to relocate to for decompression */
-diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
-index 51e2407..c05c3f6 100644
---- a/arch/x86/boot/compressed/misc.c
-+++ b/arch/x86/boot/compressed/misc.c
-@@ -285,7 +285,7 @@ static void parse_elf(void *output)
+diff -urNp linux-2.6.37/arch/x86/boot/compressed/misc.c linux-2.6.37/arch/x86/boot/compressed/misc.c
+--- linux-2.6.37/arch/x86/boot/compressed/misc.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/boot/compressed/misc.c 2011-01-17 02:41:00.000000000 -0500
+@@ -306,7 +306,7 @@ static void parse_elf(void *output)
case PT_LOAD:
#ifdef CONFIG_RELOCATABLE
dest = output;
@@ -6059,7 +6027,7 @@ index 51e2407..c05c3f6 100644
#else
dest = (void *)(phdr->p_paddr);
#endif
-@@ -332,7 +332,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
+@@ -359,7 +359,7 @@ asmlinkage void decompress_kernel(void *
error("Destination address too large");
#endif
#ifndef CONFIG_RELOCATABLE
@@ -6068,10 +6036,9 @@ index 51e2407..c05c3f6 100644
error("Wrong destination address");
#endif
-diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
-index 5c22812..1138426 100644
---- a/arch/x86/boot/compressed/mkpiggy.c
-+++ b/arch/x86/boot/compressed/mkpiggy.c
+diff -urNp linux-2.6.37/arch/x86/boot/compressed/mkpiggy.c linux-2.6.37/arch/x86/boot/compressed/mkpiggy.c
+--- linux-2.6.37/arch/x86/boot/compressed/mkpiggy.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/boot/compressed/mkpiggy.c 2011-01-17 02:41:00.000000000 -0500
@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
offs = (olen > ilen) ? olen - ilen : 0;
@@ -6081,10 +6048,9 @@ index 5c22812..1138426 100644
offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
printf(".section \".rodata..compressed\",\"a\",@progbits\n");
-diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
-index 89bbf4e..869908e 100644
---- a/arch/x86/boot/compressed/relocs.c
-+++ b/arch/x86/boot/compressed/relocs.c
+diff -urNp linux-2.6.37/arch/x86/boot/compressed/relocs.c linux-2.6.37/arch/x86/boot/compressed/relocs.c
+--- linux-2.6.37/arch/x86/boot/compressed/relocs.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/boot/compressed/relocs.c 2011-01-17 02:41:00.000000000 -0500
@@ -13,8 +13,11 @@
static void die(char *fmt, ...);
@@ -6237,7 +6203,7 @@ index 89bbf4e..869908e 100644
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_REL) {
-@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
!is_rel_reloc(sym_name(sym_strtab, sym))) {
continue;
}
@@ -6260,7 +6226,7 @@ index 89bbf4e..869908e 100644
switch (r_type) {
case R_386_NONE:
case R_386_PC32:
-@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
+@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
static void emit_relocs(int as_text)
{
@@ -6277,10 +6243,9 @@ index 89bbf4e..869908e 100644
read_shdrs(fp);
read_strtabs(fp);
read_symtabs(fp);
-diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
-index 4d3ff03..e4972ff 100644
---- a/arch/x86/boot/cpucheck.c
-+++ b/arch/x86/boot/cpucheck.c
+diff -urNp linux-2.6.37/arch/x86/boot/cpucheck.c linux-2.6.37/arch/x86/boot/cpucheck.c
+--- linux-2.6.37/arch/x86/boot/cpucheck.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/boot/cpucheck.c 2011-01-17 02:41:00.000000000 -0500
@@ -74,7 +74,7 @@ static int has_fpu(void)
u16 fcw = -1, fsw = -1;
u32 cr0;
@@ -6335,7 +6300,7 @@ index 4d3ff03..e4972ff 100644
: "+a" (eax),
"=c" (cpu.flags[6]),
"=d" (cpu.flags[1])
-@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
u32 ecx = MSR_K7_HWCR;
u32 eax, edx;
@@ -6347,7 +6312,7 @@ index 4d3ff03..e4972ff 100644
get_flags(); /* Make sure it really did something */
err = check_flags();
-@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
u32 ecx = MSR_VIA_FCR;
u32 eax, edx;
@@ -6359,7 +6324,7 @@ index 4d3ff03..e4972ff 100644
set_bit(X86_FEATURE_CX8, cpu.flags);
err = check_flags();
-@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
u32 eax, edx;
u32 level = 1;
@@ -6376,11 +6341,10 @@ index 4d3ff03..e4972ff 100644
err = check_flags();
}
-diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
-index 93e689f..504ba09 100644
---- a/arch/x86/boot/header.S
-+++ b/arch/x86/boot/header.S
-@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
+diff -urNp linux-2.6.37/arch/x86/boot/header.S linux-2.6.37/arch/x86/boot/header.S
+--- linux-2.6.37/arch/x86/boot/header.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/boot/header.S 2011-01-17 02:41:00.000000000 -0500
+@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
# single linked list of
# struct setup_data
@@ -6389,10 +6353,9 @@ index 93e689f..504ba09 100644
#define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
#define VO_INIT_SIZE (VO__end - VO__text)
-diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
-index cae3feb..ff8ff2a 100644
---- a/arch/x86/boot/memory.c
-+++ b/arch/x86/boot/memory.c
+diff -urNp linux-2.6.37/arch/x86/boot/memory.c linux-2.6.37/arch/x86/boot/memory.c
+--- linux-2.6.37/arch/x86/boot/memory.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/boot/memory.c 2011-01-17 02:41:00.000000000 -0500
@@ -19,7 +19,7 @@
static int detect_memory_e820(void)
@@ -6402,22 +6365,9 @@ index cae3feb..ff8ff2a 100644
struct biosregs ireg, oreg;
struct e820entry *desc = boot_params.e820_map;
static struct e820entry buf; /* static so it is zeroed */
-diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
-index 11e8c6e..fdbb1ed 100644
---- a/arch/x86/boot/video-vesa.c
-+++ b/arch/x86/boot/video-vesa.c
-@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
-
- boot_params.screen_info.vesapm_seg = oreg.es;
- boot_params.screen_info.vesapm_off = oreg.di;
-+ boot_params.screen_info.vesapm_size = oreg.cx;
- }
-
- /*
-diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
-index 43eda28..5ab5fdb 100644
---- a/arch/x86/boot/video.c
-+++ b/arch/x86/boot/video.c
+diff -urNp linux-2.6.37/arch/x86/boot/video.c linux-2.6.37/arch/x86/boot/video.c
+--- linux-2.6.37/arch/x86/boot/video.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/boot/video.c 2011-01-17 02:41:00.000000000 -0500
@@ -96,7 +96,7 @@ static void store_mode_params(void)
static unsigned int get_entry(void)
{
@@ -6427,11 +6377,21 @@ index 43eda28..5ab5fdb 100644
int key;
unsigned int v;
-diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
-index 0350311..a2de328 100644
---- a/arch/x86/ia32/ia32_aout.c
-+++ b/arch/x86/ia32/ia32_aout.c
-@@ -168,6 +168,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
+diff -urNp linux-2.6.37/arch/x86/boot/video-vesa.c linux-2.6.37/arch/x86/boot/video-vesa.c
+--- linux-2.6.37/arch/x86/boot/video-vesa.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/boot/video-vesa.c 2011-01-17 02:41:00.000000000 -0500
+@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
+
+ boot_params.screen_info.vesapm_seg = oreg.es;
+ boot_params.screen_info.vesapm_off = oreg.di;
++ boot_params.screen_info.vesapm_size = oreg.cx;
+ }
+
+ /*
+diff -urNp linux-2.6.37/arch/x86/ia32/ia32_aout.c linux-2.6.37/arch/x86/ia32/ia32_aout.c
+--- linux-2.6.37/arch/x86/ia32/ia32_aout.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/ia32/ia32_aout.c 2011-01-17 02:41:00.000000000 -0500
+@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
unsigned long dump_start, dump_size;
struct user32 dump;
@@ -6440,59 +6400,9 @@ index 0350311..a2de328 100644
fs = get_fs();
set_fs(KERNEL_DS);
has_dumped = 1;
-@@ -217,12 +219,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
- dump_size = dump.u_ssize << PAGE_SHIFT;
- DUMP_WRITE(dump_start, dump_size);
- }
-- /*
-- * Finally dump the task struct. Not be used by gdb, but
-- * could be useful
-- */
-- set_fs(KERNEL_DS);
-- DUMP_WRITE(current, sizeof(*current));
- end_coredump:
- set_fs(fs);
- return has_dumped;
-diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
-index 588a7aa..d49dacb 100644
---- a/arch/x86/ia32/ia32_signal.c
-+++ b/arch/x86/ia32/ia32_signal.c
-@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
- sp -= frame_size;
- /* Align the stack pointer according to the i386 ABI,
- * i.e. so that on function entry ((sp + 4) & 15) == 0. */
-- sp = ((sp + 4) & -16ul) - 4;
-+ sp = ((sp - 12) & -16ul) - 4;
- return (void __user *) sp;
- }
-
-@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- 0xb8,
- __NR_ia32_rt_sigreturn,
- 0x80cd,
-- 0,
-+ 0
- };
-
- frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
-@@ -533,9 +533,11 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-
- if (ka->sa.sa_flags & SA_RESTORER)
- restorer = ka->sa.sa_restorer;
-+ else if (current->mm->context.vdso)
-+ /* Return stub is in 32bit vsyscall page */
-+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
- else
-- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
-- rt_sigreturn);
-+ restorer = &frame->retcode;
- put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
-
- /*
-diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 4f5f71e..392161a 100644
---- a/arch/x86/ia32/ia32entry.S
-+++ b/arch/x86/ia32/ia32entry.S
+diff -urNp linux-2.6.37/arch/x86/ia32/ia32entry.S linux-2.6.37/arch/x86/ia32/ia32entry.S
+--- linux-2.6.37/arch/x86/ia32/ia32entry.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/ia32/ia32entry.S 2011-01-17 02:41:00.000000000 -0500
@@ -13,6 +13,7 @@
#include <asm/thread_info.h>
#include <asm/segment.h>
@@ -6587,12 +6497,63 @@ index 4f5f71e..392161a 100644
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs and here we enable it straight after entry:
-diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
-index 03b6bb5..ed0fb81 100644
---- a/arch/x86/include/asm/alternative.h
-+++ b/arch/x86/include/asm/alternative.h
-@@ -91,7 +91,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
- " .byte 664f-663f\n" /* replacementlen */ \
+diff -urNp linux-2.6.37/arch/x86/ia32/ia32_signal.c linux-2.6.37/arch/x86/ia32/ia32_signal.c
+--- linux-2.6.37/arch/x86/ia32/ia32_signal.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/ia32/ia32_signal.c 2011-01-17 02:41:00.000000000 -0500
+@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
+ sp -= frame_size;
+ /* Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ return (void __user *) sp;
+ }
+
+@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
+ * These are actually not used anymore, but left because some
+ * gdb versions depend on them as a marker.
+ */
+- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
+ 0xb8,
+ __NR_ia32_rt_sigreturn,
+ 0x80cd,
+- 0,
++ 0
+ };
+
+ frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
+@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
+
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
++ else if (current->mm->context.vdso)
++ /* Return stub is in 32bit vsyscall page */
++ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+ else
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
+- rt_sigreturn);
++ restorer = &frame->retcode;
+ put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+
+ /*
+ * Not actually used anymore, but left because some gdb
+ * versions need it.
+ */
+- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+diff -urNp linux-2.6.37/arch/x86/include/asm/alternative.h linux-2.6.37/arch/x86/include/asm/alternative.h
+--- linux-2.6.37/arch/x86/include/asm/alternative.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/alternative.h 2011-01-17 02:41:00.000000000 -0500
+@@ -93,7 +93,7 @@ static inline int alternatives_text_rese
+ ".section .discard,\"aw\",@progbits\n" \
" .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
".previous\n" \
- ".section .altinstr_replacement, \"ax\"\n" \
@@ -6600,11 +6561,10 @@ index 03b6bb5..ed0fb81 100644
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous"
-diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
-index 20370c6..a2eb9b0 100644
---- a/arch/x86/include/asm/apm.h
-+++ b/arch/x86/include/asm/apm.h
-@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
+diff -urNp linux-2.6.37/arch/x86/include/asm/apm.h linux-2.6.37/arch/x86/include/asm/apm.h
+--- linux-2.6.37/arch/x86/include/asm/apm.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/apm.h 2011-01-17 02:41:00.000000000 -0500
+@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -6613,7 +6573,7 @@ index 20370c6..a2eb9b0 100644
"setc %%al\n\t"
"popl %%ebp\n\t"
"popl %%edi\n\t"
-@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -6622,340 +6582,327 @@ index 20370c6..a2eb9b0 100644
"setc %%bl\n\t"
"popl %%ebp\n\t"
"popl %%edi\n\t"
-diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
-index b3ed1e1..6b18beb 100644
---- a/arch/x86/include/asm/asm.h
-+++ b/arch/x86/include/asm/asm.h
-@@ -37,6 +37,12 @@
- #define _ASM_SI __ASM_REG(si)
- #define _ASM_DI __ASM_REG(di)
+diff -urNp linux-2.6.37/arch/x86/include/asm/atomic64_32.h linux-2.6.37/arch/x86/include/asm/atomic64_32.h
+--- linux-2.6.37/arch/x86/include/asm/atomic64_32.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/atomic64_32.h 2011-01-17 02:41:00.000000000 -0500
+@@ -12,6 +12,14 @@ typedef struct {
+ u64 __aligned(8) counter;
+ } atomic64_t;
-+#ifdef CONFIG_X86_32
-+#define _ASM_INTO "into"
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ u64 __aligned(8) counter;
++} atomic64_unchecked_t;
+#else
-+#define _ASM_INTO "int $4"
++typedef atomic64_t atomic64_unchecked_t;
+#endif
+
- /* Exception table entry */
- #ifdef __ASSEMBLY__
- # define _ASM_EXTABLE(from,to) \
-diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
-index 952a826..c4830ea 100644
---- a/arch/x86/include/asm/atomic.h
-+++ b/arch/x86/include/asm/atomic.h
-@@ -26,6 +26,17 @@ static inline int atomic_read(const atomic_t *v)
- }
+ #define ATOMIC64_INIT(val) { (val) }
- /**
-+ * atomic_read_unchecked - read atomic variable
-+ * @v: pointer of type atomic_unchecked_t
+ #ifdef CONFIG_X86_CMPXCHG64
+diff -urNp linux-2.6.37/arch/x86/include/asm/atomic64_64.h linux-2.6.37/arch/x86/include/asm/atomic64_64.h
+--- linux-2.6.37/arch/x86/include/asm/atomic64_64.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/atomic64_64.h 2011-01-17 02:41:00.000000000 -0500
+@@ -18,7 +18,19 @@
+ */
+ static inline long atomic64_read(const atomic64_t *v)
+ {
+- return (*(volatile long *)&(v)->counter);
++ return (*(volatile const long *)&(v)->counter);
++}
++
++/**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer of type atomic64_unchecked_t
+ *
+ * Atomically reads the value of @v.
++ * Doesn't imply a read memory barrier.
+ */
-+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
-+ return v->counter;
-+}
-+
-+/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
-@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
++ return (*(volatile const long *)&(v)->counter);
}
/**
-+ * atomic_set_unchecked - set atomic variable
-+ * @v: pointer of type atomic_unchecked_t
+@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
-+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
+{
+ v->counter = i;
+}
+
+/**
- * atomic_add - add integer to atomic variable
+ * atomic64_add - add integer to atomic64 variable
* @i: integer value to add
- * @v: pointer of type atomic_t
-@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
+ * @v: pointer to type atomic64_t
+@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
*/
- static inline void atomic_add(int i, atomic_t *v)
+ static inline void atomic64_add(long i, atomic64_t *v)
{
-- asm volatile(LOCK_PREFIX "addl %1,%0"
-+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++ asm volatile(LOCK_PREFIX "addq %1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "subl %1,%0\n"
-+ _ASM_INTO "\n0:\n"
++ LOCK_PREFIX "subq %1,%0\n"
++ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
-+ : "+m" (v->counter)
-+ : "ir" (i));
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
+}
+
+/**
-+ * atomic_add_unchecked - add integer to atomic variable
++ * atomic64_add_unchecked - add integer to atomic64 variable
+ * @i: integer value to add
-+ * @v: pointer of type atomic_unchecked_t
++ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically adds @i to @v.
+ */
-+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
+{
-+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
- : "+m" (v->counter)
- : "ir" (i));
- }
-@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
+ asm volatile(LOCK_PREFIX "addq %1,%0"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
*/
- static inline void atomic_sub(int i, atomic_t *v)
+ static inline void atomic64_sub(long i, atomic64_t *v)
{
-- asm volatile(LOCK_PREFIX "subl %1,%0"
-+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+- asm volatile(LOCK_PREFIX "subq %1,%0"
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "addl %1,%0\n"
-+ _ASM_INTO "\n0:\n"
++ LOCK_PREFIX "addq %1,%0\n"
++ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
-+ : "+m" (v->counter)
-+ : "ir" (i));
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
+}
+
+/**
-+ * atomic_sub_unchecked - subtract integer from atomic variable
++ * atomic64_sub_unchecked - subtract the atomic64 variable
+ * @i: integer value to subtract
-+ * @v: pointer of type atomic_t
++ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically subtracts @i from @v.
+ */
-+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
+{
-+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
- : "+m" (v->counter)
- : "ir" (i));
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
}
-@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
{
unsigned char c;
-- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
-+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
+- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subq %2,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "addl %2,%0\n"
-+ _ASM_INTO "\n0:\n"
++ LOCK_PREFIX "addq %2,%0\n"
++ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ "sete %1\n"
- : "+m" (v->counter), "=qm" (c)
- : "ir" (i) : "memory");
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
return c;
-@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
*/
- static inline void atomic_inc(atomic_t *v)
+ static inline void atomic64_inc(atomic64_t *v)
{
-- asm volatile(LOCK_PREFIX "incl %0"
-+ asm volatile(LOCK_PREFIX "incl %0\n"
++ asm volatile(LOCK_PREFIX "incq %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "decl %0\n"
-+ _ASM_INTO "\n0:\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
-+ : "+m" (v->counter));
++ : "=m" (v->counter)
++ : "m" (v->counter));
+}
+
+/**
-+ * atomic_inc_unchecked - increment atomic variable
-+ * @v: pointer of type atomic_unchecked_t
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically increments @v by 1.
+ */
-+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
+{
-+ asm volatile(LOCK_PREFIX "incl %0\n"
- : "+m" (v->counter));
- }
-
-@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
+ asm volatile(LOCK_PREFIX "incq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
*/
- static inline void atomic_dec(atomic_t *v)
+ static inline void atomic64_dec(atomic64_t *v)
{
-- asm volatile(LOCK_PREFIX "decl %0"
-+ asm volatile(LOCK_PREFIX "decl %0\n"
+- asm volatile(LOCK_PREFIX "decq %0"
++ asm volatile(LOCK_PREFIX "decq %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "incl %0\n"
-+ _ASM_INTO "\n0:\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
-+ : "+m" (v->counter));
++ : "=m" (v->counter)
++ : "m" (v->counter));
+}
+
+/**
-+ * atomic_dec_unchecked - decrement atomic variable
-+ * @v: pointer of type atomic_t
++ * atomic64_dec_unchecked - decrement atomic64 variable
++ * @v: pointer to type atomic64_t
+ *
+ * Atomically decrements @v by 1.
+ */
-+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
+{
-+ asm volatile(LOCK_PREFIX "decl %0\n"
- : "+m" (v->counter));
++ asm volatile(LOCK_PREFIX "decq %0\n"
+ : "=m" (v->counter)
+ : "m" (v->counter));
}
-
-@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
+@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
{
unsigned char c;
-- asm volatile(LOCK_PREFIX "decl %0; sete %1"
-+ asm volatile(LOCK_PREFIX "decl %0\n"
+- asm volatile(LOCK_PREFIX "decq %0; sete %1"
++ asm volatile(LOCK_PREFIX "decq %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "incl %0\n"
-+ _ASM_INTO "\n0:\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ "sete %1\n"
- : "+m" (v->counter), "=qm" (c)
- : : "memory");
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
return c != 0;
-@@ -138,7 +263,16 @@ static inline int atomic_inc_and_test(atomic_t *v)
+@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
{
unsigned char c;
-- asm volatile(LOCK_PREFIX "incl %0; sete %1"
-+ asm volatile(LOCK_PREFIX "incl %0\n"
+- asm volatile(LOCK_PREFIX "incq %0; sete %1"
++ asm volatile(LOCK_PREFIX "incq %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "decl %0\n"
-+ _ASM_INTO "\n0:\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ "sete %1\n"
- : "+m" (v->counter), "=qm" (c)
- : : "memory");
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
return c != 0;
-@@ -157,7 +291,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
+@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
{
unsigned char c;
-- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
-+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
+- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addq %2,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "subl %2,%0\n"
-+ _ASM_INTO "\n0:\n"
++ LOCK_PREFIX "subq %2,%0\n"
++ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ "sets %1\n"
- : "+m" (v->counter), "=qm" (c)
- : "ir" (i) : "memory");
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
return c;
-@@ -180,6 +323,46 @@ static inline int atomic_add_return(int i, atomic_t *v)
- #endif
- /* Modern 486+ processor */
- __i = i;
-+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
+@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
+ static inline long atomic64_add_return(long i, atomic64_t *v)
+ {
+ long __i = i;
+- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
++ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ "movl %0, %1\n"
-+ _ASM_INTO "\n0:\n"
++ "movq %0, %1\n"
++ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
-+
-+#ifdef CONFIG_M386
-+no_xadd: /* Legacy 386 processor */
-+ local_irq_save(flags);
-+ __i = atomic_read(v);
-+ atomic_set(v, i + __i);
-+ local_irq_restore(flags);
-+ return i + __i;
-+#endif
+}
+
+/**
-+ * atomic_add_return_unchecked - add integer and return
-+ * @v: pointer of type atomic_unchecked_t
++ * atomic64_add_return_unchecked - add and return
+ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
-+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
+{
-+ int __i;
-+#ifdef CONFIG_M386
-+ unsigned long flags;
-+ if (unlikely(boot_cpu_data.x86 <= 3))
-+ goto no_xadd;
-+#endif
-+ /* Modern 486+ processor */
-+ __i = i;
- asm volatile(LOCK_PREFIX "xaddl %0, %1"
++ long __i = i;
++ asm volatile(LOCK_PREFIX "xaddq %0, %1"
: "+r" (i), "+m" (v->counter)
: : "memory");
-@@ -208,6 +391,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+ return i + __i;
+@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
}
- #define atomic_inc_return(v) (atomic_add_return(1, v))
-+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+ #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
+{
-+ return atomic_add_return_unchecked(1, v);
++ return atomic64_add_return_unchecked(1, v);
+}
- #define atomic_dec_return(v) (atomic_sub_return(1, v))
+ #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
- static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-@@ -231,17 +418,30 @@ static inline int atomic_xchg(atomic_t *v, int new)
+ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+@@ -206,17 +380,30 @@ static inline long atomic64_xchg(atomic6
*/
- static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
{
-- int c, old;
-+ int c, old, new;
- c = atomic_read(v);
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
for (;;) {
- if (unlikely(c == (u)))
+ if (unlikely(c == u))
break;
-- old = atomic_cmpxchg((v), c, c + (a));
+- old = atomic64_cmpxchg((v), c, c + (a));
+
-+ asm volatile("addl %2,%0\n"
++ asm volatile("add %2,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ "subl %2,%0\n"
-+ _ASM_INTO "\n0:\n"
++ "sub %2,%0\n"
++ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ : "=r" (new)
+ : "0" (c), "ir" (a));
+
-+ old = atomic_cmpxchg(v, c, new);
++ old = atomic64_cmpxchg(v, c, new);
if (likely(old == c))
break;
c = old;
@@ -6964,254 +6911,250 @@ index 952a826..c4830ea 100644
+ return c != u;
}
- #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
-index 2a934aa..5c1a942 100644
---- a/arch/x86/include/asm/atomic64_32.h
-+++ b/arch/x86/include/asm/atomic64_32.h
-@@ -12,6 +12,14 @@ typedef struct {
- u64 __aligned(8) counter;
- } atomic64_t;
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+typedef struct {
-+ u64 __aligned(8) counter;
-+} atomic64_unchecked_t;
-+#else
-+typedef atomic64_t atomic64_unchecked_t;
-+#endif
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff -urNp linux-2.6.37/arch/x86/include/asm/atomic.h linux-2.6.37/arch/x86/include/asm/atomic.h
+--- linux-2.6.37/arch/x86/include/asm/atomic.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/atomic.h 2011-01-17 02:41:00.000000000 -0500
+@@ -22,7 +22,18 @@
+ */
+ static inline int atomic_read(const atomic_t *v)
+ {
+- return (*(volatile int *)&(v)->counter);
++ return (*(volatile const int *)&(v)->counter);
++}
+
- #define ATOMIC64_INIT(val) { (val) }
-
- #ifdef CONFIG_X86_CMPXCHG64
-diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
-index 49fd1ea..6b79575 100644
---- a/arch/x86/include/asm/atomic64_64.h
-+++ b/arch/x86/include/asm/atomic64_64.h
-@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
- }
-
- /**
-+ * atomic64_read_unchecked - read atomic64 variable
-+ * @v: pointer of type atomic64_unchecked_t
++/**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically reads the value of @v.
-+ * Doesn't imply a read memory barrier.
+ */
-+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
+{
-+ return v->counter;
-+}
-+
-+/**
- * atomic64_set - set atomic64 variable
- * @v: pointer to type atomic64_t
- * @i: required value
-@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
++ return (*(volatile const int *)&(v)->counter);
}
/**
-+ * atomic64_set_unchecked - set atomic64 variable
-+ * @v: pointer to type atomic64_unchecked_t
+@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
+ }
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
-+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
+{
+ v->counter = i;
+}
+
+/**
- * atomic64_add - add integer to atomic64 variable
+ * atomic_add - add integer to atomic variable
* @i: integer value to add
- * @v: pointer to type atomic64_t
-@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
+ * @v: pointer of type atomic_t
+@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
*/
- static inline void atomic64_add(long i, atomic64_t *v)
+ static inline void atomic_add(int i, atomic_t *v)
{
-+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "subq %1,%0\n"
++ LOCK_PREFIX "subl %1,%0\n"
+ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
-+ : "=m" (v->counter)
-+ : "er" (i), "m" (v->counter));
++ : "+m" (v->counter)
++ : "ir" (i));
+}
+
+/**
-+ * atomic64_add_unchecked - add integer to atomic64 variable
++ * atomic_add_unchecked - add integer to atomic variable
+ * @i: integer value to add
-+ * @v: pointer to type atomic64_unchecked_t
++ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically adds @i to @v.
+ */
-+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
+{
- asm volatile(LOCK_PREFIX "addq %1,%0"
- : "=m" (v->counter)
- : "er" (i), "m" (v->counter));
-@@ -56,7 +102,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
*/
- static inline void atomic64_sub(long i, atomic64_t *v)
+ static inline void atomic_sub(int i, atomic_t *v)
{
-- asm volatile(LOCK_PREFIX "subq %1,%0"
-+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "addq %1,%0\n"
++ LOCK_PREFIX "addl %1,%0\n"
+ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
- : "=m" (v->counter)
- : "er" (i), "m" (v->counter));
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_sub_unchecked - subtract integer from atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
}
-@@ -74,7 +128,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
{
unsigned char c;
-- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
-+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
+- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "addq %2,%0\n"
++ LOCK_PREFIX "addl %2,%0\n"
+ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ "sete %1\n"
- : "=m" (v->counter), "=qm" (c)
- : "er" (i), "m" (v->counter) : "memory");
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
-@@ -88,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
*/
- static inline void atomic64_inc(atomic64_t *v)
+ static inline void atomic_inc(atomic_t *v)
{
-+ asm volatile(LOCK_PREFIX "incq %0\n"
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "decq %0\n"
++ LOCK_PREFIX "decl %0\n"
+ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
-+ : "=m" (v->counter)
-+ : "m" (v->counter));
++ : "+m" (v->counter));
+}
+
+/**
-+ * atomic64_inc_unchecked - increment atomic64 variable
-+ * @v: pointer to type atomic64_unchecked_t
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically increments @v by 1.
+ */
-+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
+{
- asm volatile(LOCK_PREFIX "incq %0"
- : "=m" (v->counter)
- : "m" (v->counter));
-@@ -101,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
*/
- static inline void atomic64_dec(atomic64_t *v)
+ static inline void atomic_dec(atomic_t *v)
{
-- asm volatile(LOCK_PREFIX "decq %0"
-+ asm volatile(LOCK_PREFIX "decq %0\n"
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "incq %0\n"
++ LOCK_PREFIX "incl %0\n"
+ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
-+ : "=m" (v->counter)
-+ : "m" (v->counter));
++ : "+m" (v->counter));
+}
+
+/**
-+ * atomic64_dec_unchecked - decrement atomic64 variable
-+ * @v: pointer to type atomic64_t
++ * atomic_dec_unchecked - decrement atomic variable
++ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1.
+ */
-+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
+{
-+ asm volatile(LOCK_PREFIX "decq %0\n"
- : "=m" (v->counter)
- : "m" (v->counter));
++ asm volatile(LOCK_PREFIX "decl %0\n"
+ : "+m" (v->counter));
}
-@@ -118,7 +223,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
+
+@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
{
unsigned char c;
-- asm volatile(LOCK_PREFIX "decq %0; sete %1"
-+ asm volatile(LOCK_PREFIX "decq %0\n"
+- asm volatile(LOCK_PREFIX "decl %0; sete %1"
++ asm volatile(LOCK_PREFIX "decl %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "incq %0\n"
++ LOCK_PREFIX "incl %0\n"
+ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ "sete %1\n"
- : "=m" (v->counter), "=qm" (c)
- : "m" (v->counter) : "memory");
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
return c != 0;
-@@ -136,7 +250,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
+@@ -138,7 +263,16 @@ static inline int atomic_inc_and_test(at
{
unsigned char c;
-- asm volatile(LOCK_PREFIX "incq %0; sete %1"
-+ asm volatile(LOCK_PREFIX "incq %0\n"
+- asm volatile(LOCK_PREFIX "incl %0; sete %1"
++ asm volatile(LOCK_PREFIX "incl %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "decq %0\n"
++ LOCK_PREFIX "decl %0\n"
+ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ "sete %1\n"
- : "=m" (v->counter), "=qm" (c)
- : "m" (v->counter) : "memory");
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
return c != 0;
-@@ -155,7 +278,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
+@@ -157,7 +291,16 @@ static inline int atomic_add_negative(in
{
unsigned char c;
-- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
-+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
+- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ LOCK_PREFIX "subq %2,%0\n"
++ LOCK_PREFIX "subl %2,%0\n"
+ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ "sets %1\n"
- : "=m" (v->counter), "=qm" (c)
- : "er" (i), "m" (v->counter) : "memory");
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
-@@ -171,7 +303,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
- static inline long atomic64_add_return(long i, atomic64_t *v)
- {
- long __i = i;
-- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
-+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
+@@ -180,6 +323,46 @@ static inline int atomic_add_return(int
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ "movq %0, %1\n"
++ "movl %0, %1\n"
+ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
@@ -7219,51 +7162,66 @@ index 49fd1ea..6b79575 100644
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
++
++#ifdef CONFIG_M386
++no_xadd: /* Legacy 386 processor */
++ local_irq_save(flags);
++ __i = atomic_read(v);
++ atomic_set(v, i + __i);
++ local_irq_restore(flags);
++ return i + __i;
++#endif
+}
+
+/**
-+ * atomic64_add_return_unchecked - add and return
++ * atomic_add_return_unchecked - add integer and return
++ * @v: pointer of type atomic_unchecked_t
+ * @i: integer value to add
-+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
-+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
+{
-+ long __i = i;
-+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
++ int __i;
++#ifdef CONFIG_M386
++ unsigned long flags;
++ if (unlikely(boot_cpu_data.x86 <= 3))
++ goto no_xadd;
++#endif
++ /* Modern 486+ processor */
++ __i = i;
+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
: "+r" (i), "+m" (v->counter)
: : "memory");
- return i + __i;
-@@ -183,6 +339,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
+@@ -208,6 +391,10 @@ static inline int atomic_sub_return(int
}
- #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
-+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+{
-+ return atomic64_add_return_unchecked(1, v);
++ return atomic_add_return_unchecked(1, v);
+}
- #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
- static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
-@@ -206,17 +366,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+@@ -231,17 +418,30 @@ static inline int atomic_xchg(atomic_t *
*/
- static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
-- long c, old;
-+ long c, old, new;
- c = atomic64_read(v);
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
for (;;) {
- if (unlikely(c == (u)))
+ if (unlikely(c == u))
break;
-- old = atomic64_cmpxchg((v), c, c + (a));
+- old = atomic_cmpxchg((v), c, c + (a));
+
-+ asm volatile("add %2,%0\n"
++ asm volatile("addl %2,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+ "jno 0f\n"
-+ "sub %2,%0\n"
++ "subl %2,%0\n"
+ "int $4\n0:\n"
+ _ASM_EXTABLE(0b, 0b)
+#endif
@@ -7271,7 +7229,7 @@ index 49fd1ea..6b79575 100644
+ : "=r" (new)
+ : "0" (c), "ir" (a));
+
-+ old = atomic64_cmpxchg(v, c, new);
++ old = atomic_cmpxchg(v, c, new);
if (likely(old == c))
break;
c = old;
@@ -7280,11 +7238,22 @@ index 49fd1ea..6b79575 100644
+ return c != u;
}
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
-index 3b62ab5..ea86950 100644
---- a/arch/x86/include/asm/boot.h
-+++ b/arch/x86/include/asm/boot.h
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+diff -urNp linux-2.6.37/arch/x86/include/asm/bitops.h linux-2.6.37/arch/x86/include/asm/bitops.h
+--- linux-2.6.37/arch/x86/include/asm/bitops.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/bitops.h 2011-01-17 02:41:00.000000000 -0500
+@@ -38,7 +38,7 @@
+ * a mask operation on a byte.
+ */
+ #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
+-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
++#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
+ #define CONST_MASK(nr) (1 << ((nr) & 7))
+
+ /**
+diff -urNp linux-2.6.37/arch/x86/include/asm/boot.h linux-2.6.37/arch/x86/include/asm/boot.h
+--- linux-2.6.37/arch/x86/include/asm/boot.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/boot.h 2011-01-17 02:41:00.000000000 -0500
@@ -11,10 +11,15 @@
#include <asm/pgtable_types.h>
@@ -7302,23 +7271,10 @@ index 3b62ab5..ea86950 100644
/* Minimum kernel alignment, as a power of two */
#ifdef CONFIG_X86_64
#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
-diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
-index 48f99f1..982b21c 100644
---- a/arch/x86/include/asm/cache.h
-+++ b/arch/x86/include/asm/cache.h
-@@ -8,6 +8,7 @@
- #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
- #define __read_mostly __attribute__((__section__(".data..read_mostly")))
-+#define __read_only __attribute__((__section__(".data..read_only")))
-
- #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
- #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
-diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
-index 63e35ec..718f045 100644
---- a/arch/x86/include/asm/cacheflush.h
-+++ b/arch/x86/include/asm/cacheflush.h
-@@ -66,7 +66,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
+diff -urNp linux-2.6.37/arch/x86/include/asm/cacheflush.h linux-2.6.37/arch/x86/include/asm/cacheflush.h
+--- linux-2.6.37/arch/x86/include/asm/cacheflush.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/cacheflush.h 2011-01-17 02:41:00.000000000 -0500
+@@ -66,7 +66,7 @@ static inline unsigned long get_page_mem
unsigned long pg_flags = pg->flags & _PGMT_MASK;
if (pg_flags == _PGMT_DEFAULT)
@@ -7327,11 +7283,21 @@ index 63e35ec..718f045 100644
else if (pg_flags == _PGMT_WC)
return _PAGE_CACHE_WC;
else if (pg_flags == _PGMT_UC_MINUS)
-diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
-index 46fc474..b02b0f9 100644
---- a/arch/x86/include/asm/checksum_32.h
-+++ b/arch/x86/include/asm/checksum_32.h
-@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+diff -urNp linux-2.6.37/arch/x86/include/asm/cache.h linux-2.6.37/arch/x86/include/asm/cache.h
+--- linux-2.6.37/arch/x86/include/asm/cache.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/cache.h 2011-01-17 02:41:00.000000000 -0500
+@@ -8,6 +8,7 @@
+ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
++#define __read_only __attribute__((__section__(".data..read_only")))
+
+ #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
+ #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
+diff -urNp linux-2.6.37/arch/x86/include/asm/checksum_32.h linux-2.6.37/arch/x86/include/asm/checksum_32.h
+--- linux-2.6.37/arch/x86/include/asm/checksum_32.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/checksum_32.h 2011-01-17 02:41:00.000000000 -0500
+@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
@@ -7346,7 +7312,7 @@ index 46fc474..b02b0f9 100644
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
-@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
+@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
int *err_ptr)
{
might_sleep();
@@ -7355,7 +7321,7 @@ index 46fc474..b02b0f9 100644
len, sum, err_ptr, NULL);
}
-@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
+@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
{
might_sleep();
if (access_ok(VERIFY_WRITE, dst, len))
@@ -7364,23 +7330,21 @@ index 46fc474..b02b0f9 100644
len, sum, NULL, err_ptr);
if (len)
-diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
-index 14e0ee1..c5ff697 100644
---- a/arch/x86/include/asm/cpufeature.h
-+++ b/arch/x86/include/asm/cpufeature.h
-@@ -323,7 +323,7 @@ static __always_inline __pure bool __static_cpu_has(u8 bit)
- " .byte 4f - 3f\n" /* replacement len */
- " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */
+diff -urNp linux-2.6.37/arch/x86/include/asm/cpufeature.h linux-2.6.37/arch/x86/include/asm/cpufeature.h
+--- linux-2.6.37/arch/x86/include/asm/cpufeature.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/cpufeature.h 2011-01-17 02:41:00.000000000 -0500
+@@ -349,7 +349,7 @@ static __always_inline __pure bool __sta
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
".previous\n"
- ".section .altinstr_replacement,\"ax\"\n"
+ ".section .altinstr_replacement,\"a\"\n"
"3: movb $1,%0\n"
"4:\n"
".previous\n"
-diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
-index 617bd56..8d4356d 100644
---- a/arch/x86/include/asm/desc.h
-+++ b/arch/x86/include/asm/desc.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/desc.h linux-2.6.37/arch/x86/include/asm/desc.h
+--- linux-2.6.37/arch/x86/include/asm/desc.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/desc.h 2011-01-17 02:41:00.000000000 -0500
@@ -4,6 +4,7 @@
#include <asm/desc_defs.h>
#include <asm/ldt.h>
@@ -7389,7 +7353,7 @@ index 617bd56..8d4356d 100644
#include <linux/smp.h>
static inline void fill_ldt(struct desc_struct *desc,
-@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
+@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
desc->type = (info->read_exec_only ^ 1) << 1;
desc->type |= info->contents << 2;
@@ -7397,7 +7361,7 @@ index 617bd56..8d4356d 100644
desc->s = 1;
desc->dpl = 0x3;
desc->p = info->seg_not_present ^ 1;
-@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
+@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
}
extern struct desc_ptr idt_descr;
@@ -7417,7 +7381,7 @@ index 617bd56..8d4356d 100644
}
#ifdef CONFIG_X86_64
-@@ -115,19 +113,24 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
+@@ -115,19 +113,24 @@ static inline void paravirt_free_ldt(str
static inline void native_write_idt_entry(gate_desc *idt, int entry,
const gate_desc *gate)
{
@@ -7442,7 +7406,7 @@ index 617bd56..8d4356d 100644
switch (type) {
case DESC_TSS:
size = sizeof(tss_desc);
-@@ -139,7 +142,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
+@@ -139,7 +142,10 @@ static inline void native_write_gdt_entr
size = sizeof(struct desc_struct);
break;
}
@@ -7453,7 +7417,7 @@ index 617bd56..8d4356d 100644
}
static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
-@@ -211,7 +217,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
+@@ -211,7 +217,9 @@ static inline void native_set_ldt(const
static inline void native_load_tr_desc(void)
{
@@ -7463,7 +7427,7 @@ index 617bd56..8d4356d 100644
}
static inline void native_load_gdt(const struct desc_ptr *dtr)
-@@ -246,8 +254,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+@@ -246,8 +254,10 @@ static inline void native_load_tls(struc
unsigned int i;
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
@@ -7474,7 +7438,7 @@ index 617bd56..8d4356d 100644
}
#define _LDT_empty(info) \
-@@ -309,7 +319,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
+@@ -309,7 +319,7 @@ static inline void set_desc_limit(struct
desc->limit = (limit >> 16) & 0xf;
}
@@ -7483,7 +7447,7 @@ index 617bd56..8d4356d 100644
unsigned dpl, unsigned ist, unsigned seg)
{
gate_desc s;
-@@ -327,7 +337,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
+@@ -327,7 +337,7 @@ static inline void _set_gate(int gate, u
* Pentium F0 0F bugfix can have resulted in the mapped
* IDT being write-protected.
*/
@@ -7492,7 +7456,7 @@ index 617bd56..8d4356d 100644
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
-@@ -356,19 +366,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
+@@ -356,19 +366,19 @@ static inline void alloc_intr_gate(unsig
/*
* This routine sets up an interrupt gate at directory privilege level 3.
*/
@@ -7515,7 +7479,7 @@ index 617bd56..8d4356d 100644
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
-@@ -377,19 +387,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
+@@ -377,19 +387,31 @@ static inline void set_trap_gate(unsigne
static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
{
BUG_ON((unsigned)n > 0xFF);
@@ -7550,10 +7514,9 @@ index 617bd56..8d4356d 100644
+#endif
+
#endif /* _ASM_X86_DESC_H */
-diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
-index 029f230..0b9b1b3 100644
---- a/arch/x86/include/asm/device.h
-+++ b/arch/x86/include/asm/device.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/device.h linux-2.6.37/arch/x86/include/asm/device.h
+--- linux-2.6.37/arch/x86/include/asm/device.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/device.h 2011-01-17 02:41:00.000000000 -0500
@@ -6,7 +6,7 @@ struct dev_archdata {
void *acpi_handle;
#endif
@@ -7563,10 +7526,9 @@ index 029f230..0b9b1b3 100644
#endif
#if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU)
void *iommu; /* hook for IOMMU specific extension */
-diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
-index ac91eed..a3eae19 100644
---- a/arch/x86/include/asm/dma-mapping.h
-+++ b/arch/x86/include/asm/dma-mapping.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/dma-mapping.h linux-2.6.37/arch/x86/include/asm/dma-mapping.h
+--- linux-2.6.37/arch/x86/include/asm/dma-mapping.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/dma-mapping.h 2011-01-17 02:41:00.000000000 -0500
@@ -26,9 +26,9 @@ extern int iommu_merge;
extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;
@@ -7579,7 +7541,7 @@ index ac91eed..a3eae19 100644
{
#ifdef CONFIG_X86_32
return dma_ops;
-@@ -45,7 +45,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+@@ -45,7 +45,7 @@ static inline struct dma_map_ops *get_dm
/* Make sure we keep the same behaviour */
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
@@ -7588,7 +7550,7 @@ index ac91eed..a3eae19 100644
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
-@@ -123,7 +123,7 @@ static inline void *
+@@ -115,7 +115,7 @@ static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp)
{
@@ -7597,7 +7559,7 @@ index ac91eed..a3eae19 100644
void *memory;
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
-@@ -150,7 +150,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+@@ -142,7 +142,7 @@ dma_alloc_coherent(struct device *dev, s
static inline void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t bus)
{
@@ -7606,10 +7568,9 @@ index ac91eed..a3eae19 100644
WARN_ON(irqs_disabled()); /* for portability */
-diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
-index ec8a52d..7bbff69 100644
---- a/arch/x86/include/asm/e820.h
-+++ b/arch/x86/include/asm/e820.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/e820.h linux-2.6.37/arch/x86/include/asm/e820.h
+--- linux-2.6.37/arch/x86/include/asm/e820.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/e820.h 2011-01-17 02:41:00.000000000 -0500
@@ -69,7 +69,7 @@ struct e820map {
#define ISA_START_ADDRESS 0xa0000
#define ISA_END_ADDRESS 0x100000
@@ -7618,11 +7579,10 @@ index ec8a52d..7bbff69 100644
+#define BIOS_BEGIN 0x000c0000
#define BIOS_END 0x00100000
- #ifdef __KERNEL__
-diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
-index f2ad216..569e1f7 100644
---- a/arch/x86/include/asm/elf.h
-+++ b/arch/x86/include/asm/elf.h
+ #define BIOS_ROM_BASE 0xffe00000
+diff -urNp linux-2.6.37/arch/x86/include/asm/elf.h linux-2.6.37/arch/x86/include/asm/elf.h
+--- linux-2.6.37/arch/x86/include/asm/elf.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/elf.h 2011-01-17 02:41:00.000000000 -0500
@@ -237,7 +237,25 @@ extern int force_personality32;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
@@ -7668,7 +7628,7 @@ index f2ad216..569e1f7 100644
#define VDSO_ENTRY \
((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
-@@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+@@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s
extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
#define compat_arch_setup_additional_pages syscall32_setup_pages
@@ -7676,49 +7636,13 @@ index f2ad216..569e1f7 100644
-#define arch_randomize_brk arch_randomize_brk
-
#endif /* _ASM_X86_ELF_H */
-diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
-index 1f11ce4..c8cfd20 100644
---- a/arch/x86/include/asm/futex.h
-+++ b/arch/x86/include/asm/futex.h
-@@ -11,17 +11,54 @@
- #include <asm/processor.h>
+diff -urNp linux-2.6.37/arch/x86/include/asm/futex.h linux-2.6.37/arch/x86/include/asm/futex.h
+--- linux-2.6.37/arch/x86/include/asm/futex.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/futex.h 2011-01-25 20:24:56.000000000 -0500
+@@ -12,16 +12,18 @@
#include <asm/system.h>
-+#ifdef CONFIG_X86_32
#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
-+ asm volatile( \
-+ "movw\t%w6, %%ds\n" \
-+ "1:\t" insn "\n" \
-+ "2:\tpushl\t%%ss\n" \
-+ "\tpopl\t%%ds\n" \
-+ "\t.section .fixup,\"ax\"\n" \
-+ "3:\tmov\t%3, %1\n" \
-+ "\tjmp\t2b\n" \
-+ "\t.previous\n" \
-+ _ASM_EXTABLE(1b, 3b) \
-+ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
-+ : "i" (-EFAULT), "0" (oparg), "1" (0), "r" (__USER_DS))
-+
-+#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
-+ asm volatile("movw\t%w7, %%es\n" \
-+ "1:\tmovl\t%%es:%2, %0\n" \
-+ "\tmovl\t%0, %3\n" \
-+ "\t" insn "\n" \
-+ "2:\t" LOCK_PREFIX "cmpxchgl %3, %%es:%2\n"\
-+ "\tjnz\t1b\n" \
-+ "3:\tpushl\t%%ss\n" \
-+ "\tpopl\t%%es\n" \
-+ "\t.section .fixup,\"ax\"\n" \
-+ "4:\tmov\t%5, %1\n" \
-+ "\tjmp\t3b\n" \
-+ "\t.previous\n" \
-+ _ASM_EXTABLE(1b, 4b) \
-+ _ASM_EXTABLE(2b, 4b) \
-+ : "=&a" (oldval), "=&r" (ret), \
-+ "+m" (*uaddr), "=&r" (tem) \
-+ : "r" (oparg), "i" (-EFAULT), "1" (0), "r" (__USER_DS))
-+#else
-+#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
+ typecheck(u32 *, uaddr); \
asm volatile("1:\t" insn "\n" \
"2:\t.section .fixup,\"ax\"\n" \
@@ -7727,8 +7651,7 @@ index 1f11ce4..c8cfd20 100644
"\t.previous\n" \
_ASM_EXTABLE(1b, 3b) \
- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
-+ : "=r" (oldval), "=r" (ret), \
-+ "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4))\
++ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
: "i" (-EFAULT), "0" (oparg), "1" (0))
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
@@ -7736,43 +7659,33 @@ index 1f11ce4..c8cfd20 100644
asm volatile("1:\tmovl %2, %0\n" \
"\tmovl\t%0, %3\n" \
"\t" insn "\n" \
-@@ -34,10 +71,12 @@
+@@ -34,10 +36,10 @@
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=&a" (oldval), "=&r" (ret), \
- "+m" (*uaddr), "=&r" (tem) \
-+ "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4)),\
-+ "=&r" (tem) \
++ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0))
-+#endif
-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
-@@ -61,11 +100,20 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
switch (op) {
case FUTEX_OP_SET:
-+#ifdef CONFIG_X86_32
-+ __futex_atomic_op1("xchgl %0, %%ds:%2", ret, oldval, uaddr, oparg);
-+#else
- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
-+#endif
+- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
++ __futex_atomic_op1("xchgl %0, "__copyuser_seg"%2", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
-+#ifdef CONFIG_X86_32
-+ __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %%ds:%2", ret, oldval,
-+ uaddr, oparg);
-+#else
- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
+- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
++ __futex_atomic_op1(LOCK_PREFIX "xaddl %0, "__copyuser_seg"%2", ret, oldval,
uaddr, oparg);
-+#endif
break;
case FUTEX_OP_OR:
- __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
-@@ -109,7 +157,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
return ret;
}
@@ -7781,7 +7694,7 @@ index 1f11ce4..c8cfd20 100644
int newval)
{
-@@ -119,17 +167,31 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
+@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
return -ENOSYS;
#endif
@@ -7790,37 +7703,21 @@ index 1f11ce4..c8cfd20 100644
return -EFAULT;
- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
-- "2:\t.section .fixup, \"ax\"\n"
-+ asm volatile(
-+#ifdef CONFIG_X86_32
-+ "\tmovw %w5, %%ds\n"
-+ "1:\t" LOCK_PREFIX "cmpxchgl %3, %%ds:%1\n"
-+ "2:\tpushl %%ss\n"
-+ "\tpopl %%ds\n"
-+#else
-+ "1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
-+ "2:\n"
-+#endif
-+ "\t.section .fixup, \"ax\"\n"
++ asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, "__copyuser_seg"%1\n"
+ "2:\t.section .fixup, \"ax\"\n"
"3:\tmov %2, %0\n"
"\tjmp 2b\n"
"\t.previous\n"
_ASM_EXTABLE(1b, 3b)
-+#ifdef CONFIG_X86_32
- : "=a" (oldval), "+m" (*uaddr)
-+ : "i" (-EFAULT), "r" (newval), "0" (oldval), "r" (__USER_DS)
-+#else
-+ : "=a" (oldval), "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4))
+- : "=a" (oldval), "+m" (*uaddr)
++ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
: "i" (-EFAULT), "r" (newval), "0" (oldval)
-+#endif
: "memory"
);
-
-diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
-index c991b3a..e47dda2 100644
---- a/arch/x86/include/asm/i387.h
-+++ b/arch/x86/include/asm/i387.h
-@@ -77,6 +77,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
+diff -urNp linux-2.6.37/arch/x86/include/asm/i387.h linux-2.6.37/arch/x86/include/asm/i387.h
+--- linux-2.6.37/arch/x86/include/asm/i387.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/i387.h 2011-01-17 02:41:00.000000000 -0500
+@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
{
int err;
@@ -7829,10 +7726,10 @@ index c991b3a..e47dda2 100644
+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
+#endif
+
+ /* See comment in fxsave() below. */
asm volatile("1: rex64/fxrstor (%[fx])\n\t"
"2:\n"
- ".section .fixup,\"ax\"\n"
-@@ -127,6 +132,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
+@@ -109,6 +114,11 @@ static inline int fxsave_user(struct i38
{
int err;
@@ -7841,11 +7738,11 @@ index c991b3a..e47dda2 100644
+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
+#endif
+
- asm volatile("1: rex64/fxsave (%[fx])\n\t"
- "2:\n"
- ".section .fixup,\"ax\"\n"
-@@ -220,13 +230,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
- }
+ /*
+ * Clear the bytes not touched by the fxsave and reserved
+ * for the SW usage.
+@@ -189,13 +199,8 @@ static inline void fpu_fxsave(struct fpu
+ #endif /* CONFIG_X86_64 */
/* We need a safe address that is cheap to find and that is already
- in L1 during context switch. The best choices are unfortunately
@@ -7860,32 +7757,30 @@ index c991b3a..e47dda2 100644
/*
* These must be called with preempt disabled
-diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
-index 6a45ec4..2104674 100644
---- a/arch/x86/include/asm/io.h
-+++ b/arch/x86/include/asm/io.h
-@@ -214,6 +214,17 @@ extern void set_iounmap_nonlazy(void);
+diff -urNp linux-2.6.37/arch/x86/include/asm/io.h linux-2.6.37/arch/x86/include/asm/io.h
+--- linux-2.6.37/arch/x86/include/asm/io.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/io.h 2011-01-27 22:37:21.000000000 -0500
+@@ -216,6 +216,17 @@ extern void set_iounmap_nonlazy(void);
#include <linux/vmalloc.h>
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
+{
-+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
+}
+
+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
+{
-+ return (pfn + (count >> PAGE_SHIFT)) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
+}
+
/*
* Convert a virtual cached pointer to an uncached pointer
*/
-diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
-index 345c99c..7938698 100644
---- a/arch/x86/include/asm/iommu.h
-+++ b/arch/x86/include/asm/iommu.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/iommu.h linux-2.6.37/arch/x86/include/asm/iommu.h
+--- linux-2.6.37/arch/x86/include/asm/iommu.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/iommu.h 2011-01-17 02:41:00.000000000 -0500
@@ -1,7 +1,7 @@
#ifndef _ASM_X86_IOMMU_H
#define _ASM_X86_IOMMU_H
@@ -7895,11 +7790,10 @@ index 345c99c..7938698 100644
extern int force_iommu, no_iommu;
extern int iommu_detected;
extern int iommu_pass_through;
-diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
-index 9e2b952..557206e 100644
---- a/arch/x86/include/asm/irqflags.h
-+++ b/arch/x86/include/asm/irqflags.h
-@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
+diff -urNp linux-2.6.37/arch/x86/include/asm/irqflags.h linux-2.6.37/arch/x86/include/asm/irqflags.h
+--- linux-2.6.37/arch/x86/include/asm/irqflags.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/irqflags.h 2011-01-17 02:41:00.000000000 -0500
+@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
sti; \
sysexit
@@ -7911,11 +7805,10 @@ index 9e2b952..557206e 100644
#else
#define INTERRUPT_RETURN iret
#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index 76f5483..ae8e300 100644
---- a/arch/x86/include/asm/kvm_host.h
-+++ b/arch/x86/include/asm/kvm_host.h
-@@ -536,7 +536,7 @@ struct kvm_x86_ops {
+diff -urNp linux-2.6.37/arch/x86/include/asm/kvm_host.h linux-2.6.37/arch/x86/include/asm/kvm_host.h
+--- linux-2.6.37/arch/x86/include/asm/kvm_host.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/kvm_host.h 2011-01-17 02:41:00.000000000 -0500
+@@ -585,7 +585,7 @@ struct kvm_x86_ops {
const struct trace_print_flags *exit_reasons_str;
};
@@ -7924,11 +7817,10 @@ index 76f5483..ae8e300 100644
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
-diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
-index 2e99724..1fcf079 100644
---- a/arch/x86/include/asm/local.h
-+++ b/arch/x86/include/asm/local.h
-@@ -18,26 +18,90 @@ typedef struct {
+diff -urNp linux-2.6.37/arch/x86/include/asm/local.h linux-2.6.37/arch/x86/include/asm/local.h
+--- linux-2.6.37/arch/x86/include/asm/local.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/local.h 2011-01-17 02:41:00.000000000 -0500
+@@ -18,26 +18,58 @@ typedef struct {
static inline void local_inc(local_t *l)
{
@@ -7936,18 +7828,10 @@ index 2e99724..1fcf079 100644
+ asm volatile(_ASM_INC "%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ _ASM_DEC "%0\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
: "+m" (l->a.counter));
@@ -7959,18 +7843,10 @@ index 2e99724..1fcf079 100644
+ asm volatile(_ASM_DEC "%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ _ASM_INC "%0\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
: "+m" (l->a.counter));
@@ -7982,18 +7858,10 @@ index 2e99724..1fcf079 100644
+ asm volatile(_ASM_ADD "%1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ _ASM_SUB "%1,%0\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
: "+m" (l->a.counter)
@@ -8006,24 +7874,16 @@ index 2e99724..1fcf079 100644
+ asm volatile(_ASM_SUB "%1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ _ASM_ADD "%1,%0\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
: "+m" (l->a.counter)
: "ir" (i));
}
-@@ -55,7 +119,24 @@ static inline int local_sub_and_test(long i, local_t *l)
+@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
{
unsigned char c;
@@ -8031,25 +7891,17 @@ index 2e99724..1fcf079 100644
+ asm volatile(_ASM_SUB "%2,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ _ASM_ADD "%2,%0\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ "sete %1\n"
: "+m" (l->a.counter), "=qm" (c)
: "ir" (i) : "memory");
return c;
-@@ -73,7 +154,24 @@ static inline int local_dec_and_test(local_t *l)
+@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
{
unsigned char c;
@@ -8057,25 +7909,17 @@ index 2e99724..1fcf079 100644
+ asm volatile(_ASM_DEC "%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ _ASM_INC "%0\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ "sete %1\n"
: "+m" (l->a.counter), "=qm" (c)
: : "memory");
return c != 0;
-@@ -91,7 +189,24 @@ static inline int local_inc_and_test(local_t *l)
+@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
{
unsigned char c;
@@ -8083,25 +7927,17 @@ index 2e99724..1fcf079 100644
+ asm volatile(_ASM_INC "%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ _ASM_DEC "%0\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ "sete %1\n"
: "+m" (l->a.counter), "=qm" (c)
: : "memory");
return c != 0;
-@@ -110,7 +225,24 @@ static inline int local_add_negative(long i, local_t *l)
+@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
{
unsigned char c;
@@ -8109,25 +7945,17 @@ index 2e99724..1fcf079 100644
+ asm volatile(_ASM_ADD "%2,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ _ASM_SUB "%2,%0\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ "sets %1\n"
: "+m" (l->a.counter), "=qm" (c)
: "ir" (i) : "memory");
return c;
-@@ -133,7 +265,23 @@ static inline long local_add_return(long i, local_t *l)
+@@ -133,7 +201,15 @@ static inline long local_add_return(long
#endif
/* Modern 486+ processor */
__i = i;
@@ -8135,28 +7963,19 @@ index 2e99724..1fcf079 100644
+ asm volatile(_ASM_XADD "%0, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ _ASM_MOV "%0,%1\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
: "+r" (i), "+m" (l->a.counter)
: : "memory");
return i + __i;
-diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h
-index 01fdf56..3bb1b14 100644
---- a/arch/x86/include/asm/mc146818rtc.h
-+++ b/arch/x86/include/asm/mc146818rtc.h
-@@ -81,8 +81,8 @@ static inline unsigned char current_lock_cmos_reg(void)
+diff -urNp linux-2.6.37/arch/x86/include/asm/mc146818rtc.h linux-2.6.37/arch/x86/include/asm/mc146818rtc.h
+--- linux-2.6.37/arch/x86/include/asm/mc146818rtc.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/mc146818rtc.h 2011-01-17 02:41:00.000000000 -0500
+@@ -81,8 +81,8 @@ static inline unsigned char current_lock
#else
#define lock_cmos_prefix(reg) do {} while (0)
#define lock_cmos_suffix(reg) do {} while (0)
@@ -8167,10 +7986,9 @@ index 01fdf56..3bb1b14 100644
#define do_i_have_lock_cmos() 0
#define current_lock_cmos_reg() 0
#endif
-diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
-index ef51b50..514ba37 100644
---- a/arch/x86/include/asm/microcode.h
-+++ b/arch/x86/include/asm/microcode.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/microcode.h linux-2.6.37/arch/x86/include/asm/microcode.h
+--- linux-2.6.37/arch/x86/include/asm/microcode.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/microcode.h 2011-01-17 02:41:00.000000000 -0500
@@ -12,13 +12,13 @@ struct device;
enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
@@ -8211,10 +8029,9 @@ index ef51b50..514ba37 100644
{
return NULL;
}
-diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
-index 593e51d..fa69c9a 100644
---- a/arch/x86/include/asm/mman.h
-+++ b/arch/x86/include/asm/mman.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/mman.h linux-2.6.37/arch/x86/include/asm/mman.h
+--- linux-2.6.37/arch/x86/include/asm/mman.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/mman.h 2011-01-17 02:41:00.000000000 -0500
@@ -5,4 +5,14 @@
#include <asm-generic/mman.h>
@@ -8230,41 +8047,10 @@ index 593e51d..fa69c9a 100644
+#endif
+
#endif /* _ASM_X86_MMAN_H */
-diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
-index 80a1dee..239c67d 100644
---- a/arch/x86/include/asm/mmu.h
-+++ b/arch/x86/include/asm/mmu.h
-@@ -9,10 +9,23 @@
- * we put the segment information here.
- */
- typedef struct {
-- void *ldt;
-+ struct desc_struct *ldt;
- int size;
- struct mutex lock;
-- void *vdso;
-+ unsigned long vdso;
-+
-+#ifdef CONFIG_X86_32
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+ unsigned long user_cs_base;
-+ unsigned long user_cs_limit;
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
-+ cpumask_t cpu_user_cs_mask;
-+#endif
-+
-+#endif
-+#endif
-+
- } mm_context_t;
-
- #ifdef CONFIG_SMP
-diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
-index 4a2d4e0..a896757 100644
---- a/arch/x86/include/asm/mmu_context.h
-+++ b/arch/x86/include/asm/mmu_context.h
-@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *mm);
+diff -urNp linux-2.6.37/arch/x86/include/asm/mmu_context.h linux-2.6.37/arch/x86/include/asm/mmu_context.h
+--- linux-2.6.37/arch/x86/include/asm/mmu_context.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/mmu_context.h 2011-02-12 11:04:35.000000000 -0500
+@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
@@ -8286,7 +8072,7 @@ index 4a2d4e0..a896757 100644
#ifdef CONFIG_SMP
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
-@@ -34,27 +49,70 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+@@ -34,27 +49,70 @@ static inline void switch_mm(struct mm_s
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
@@ -8295,8 +8081,8 @@ index 4a2d4e0..a896757 100644
+#endif
if (likely(prev != next)) {
- /* stop flush ipis for the previous mm */
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
+- /* stop flush ipis for the previous mm */
+- cpumask_clear_cpu(cpu, mm_cpumask(prev));
#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_32
+ tlbstate = percpu_read(cpu_tlbstate.state);
@@ -8316,6 +8102,8 @@ index 4a2d4e0..a896757 100644
+#else
load_cr3(next->pgd);
+#endif
++ /* stop flush ipis for the previous mm */
++ cpumask_clear_cpu(cpu, mm_cpumask(prev));
/*
* load the LDT, if the LDT is different:
@@ -8358,7 +8146,7 @@ index 4a2d4e0..a896757 100644
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
-@@ -63,11 +121,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+@@ -63,11 +121,28 @@ static inline void switch_mm(struct mm_s
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
@@ -8388,11 +8176,39 @@ index 4a2d4e0..a896757 100644
}
#define activate_mm(prev, next) \
-diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
-index 3e2ce58..51d8f7e 100644
---- a/arch/x86/include/asm/module.h
-+++ b/arch/x86/include/asm/module.h
-@@ -59,13 +59,31 @@
+diff -urNp linux-2.6.37/arch/x86/include/asm/mmu.h linux-2.6.37/arch/x86/include/asm/mmu.h
+--- linux-2.6.37/arch/x86/include/asm/mmu.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/mmu.h 2011-01-17 02:41:00.000000000 -0500
+@@ -9,10 +9,23 @@
+ * we put the segment information here.
+ */
+ typedef struct {
+- void *ldt;
++ struct desc_struct *ldt;
+ int size;
+ struct mutex lock;
+- void *vdso;
++ unsigned long vdso;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ unsigned long user_cs_base;
++ unsigned long user_cs_limit;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpumask_t cpu_user_cs_mask;
++#endif
++
++#endif
++#endif
++
+ } mm_context_t;
+
+ #ifdef CONFIG_SMP
+diff -urNp linux-2.6.37/arch/x86/include/asm/module.h linux-2.6.37/arch/x86/include/asm/module.h
+--- linux-2.6.37/arch/x86/include/asm/module.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/module.h 2011-01-17 02:41:00.000000000 -0500
+@@ -59,8 +59,26 @@
#error unknown processor family
#endif
@@ -8403,12 +8219,7 @@ index 3e2ce58..51d8f7e 100644
+#endif
+
#ifdef CONFIG_X86_32
- # ifdef CONFIG_4KSTACKS
- # define MODULE_STACKSIZE "4KSTACKS "
- # else
- # define MODULE_STACKSIZE ""
- # endif
--# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
+-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+# ifdef CONFIG_PAX_KERNEXEC
+# define MODULE_PAX_KERNEXEC "KERNEXEC "
+# else
@@ -8419,32 +8230,28 @@ index 3e2ce58..51d8f7e 100644
+# else
+# define MODULE_GRSEC ""
+# endif
-+# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
++# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
+#else
+# define MODULE_ARCH_VERMAGIC MODULE_PAX_UDEREF
#endif
#endif /* _ASM_X86_MODULE_H */
-diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
-index 6f1b733..fe54285 100644
---- a/arch/x86/include/asm/page_32_types.h
-+++ b/arch/x86/include/asm/page_32_types.h
-@@ -15,6 +15,10 @@
- */
- #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+#define CONFIG_ARCH_TRACK_EXEC_LIMIT 1
-+#endif
-+
- #ifdef CONFIG_4KSTACKS
- #define THREAD_ORDER 0
- #else
-diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
-index 5653f43..122e562 100644
---- a/arch/x86/include/asm/paravirt.h
-+++ b/arch/x86/include/asm/paravirt.h
-@@ -720,6 +720,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+diff -urNp linux-2.6.37/arch/x86/include/asm/page_64_types.h linux-2.6.37/arch/x86/include/asm/page_64_types.h
+--- linux-2.6.37/arch/x86/include/asm/page_64_types.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/page_64_types.h 2011-01-17 02:41:00.000000000 -0500
+@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
+
+ /* duplicated to the one in bootmem.h */
+ extern unsigned long max_pfn;
+-extern unsigned long phys_base;
++extern const unsigned long phys_base;
+
+ extern unsigned long __phys_addr(unsigned long);
+ #define __phys_reloc_hide(x) (x)
+diff -urNp linux-2.6.37/arch/x86/include/asm/paravirt.h linux-2.6.37/arch/x86/include/asm/paravirt.h
+--- linux-2.6.37/arch/x86/include/asm/paravirt.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/paravirt.h 2011-01-17 02:41:00.000000000 -0500
+@@ -715,6 +715,21 @@ static inline void __set_fixmap(unsigned
pv_mmu_ops.set_fixmap(idx, phys, flags);
}
@@ -8466,7 +8273,7 @@ index 5653f43..122e562 100644
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
static inline int arch_spin_is_locked(struct arch_spinlock *lock)
-@@ -936,7 +951,7 @@ extern void default_banner(void);
+@@ -931,7 +946,7 @@ extern void default_banner(void);
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
@@ -8475,7 +8282,7 @@ index 5653f43..122e562 100644
#endif
#define INTERRUPT_RETURN \
-@@ -1013,6 +1028,21 @@ extern void default_banner(void);
+@@ -1008,6 +1023,21 @@ extern void default_banner(void);
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
CLBR_NONE, \
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
@@ -8497,11 +8304,10 @@ index 5653f43..122e562 100644
#endif /* CONFIG_X86_32 */
#endif /* __ASSEMBLY__ */
-diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
-index db9ef55..7578058 100644
---- a/arch/x86/include/asm/paravirt_types.h
-+++ b/arch/x86/include/asm/paravirt_types.h
-@@ -312,6 +312,12 @@ struct pv_mmu_ops {
+diff -urNp linux-2.6.37/arch/x86/include/asm/paravirt_types.h linux-2.6.37/arch/x86/include/asm/paravirt_types.h
+--- linux-2.6.37/arch/x86/include/asm/paravirt_types.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/paravirt_types.h 2011-01-17 02:41:00.000000000 -0500
+@@ -311,6 +311,12 @@ struct pv_mmu_ops {
an mfn. We can tell which is which from the index. */
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
@@ -8514,11 +8320,10 @@ index db9ef55..7578058 100644
};
struct arch_spinlock;
-diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
-index cd2a31d..f199fa7 100644
---- a/arch/x86/include/asm/pci_x86.h
-+++ b/arch/x86/include/asm/pci_x86.h
-@@ -91,16 +91,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
+diff -urNp linux-2.6.37/arch/x86/include/asm/pci_x86.h linux-2.6.37/arch/x86/include/asm/pci_x86.h
+--- linux-2.6.37/arch/x86/include/asm/pci_x86.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/pci_x86.h 2011-01-17 02:41:01.000000000 -0500
+@@ -93,16 +93,16 @@ extern int (*pcibios_enable_irq)(struct
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
struct pci_raw_ops {
@@ -8540,11 +8345,10 @@ index cd2a31d..f199fa7 100644
extern bool port_cf9_safe;
/* arch_initcall level */
-diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
-index 271de94..ef944d6 100644
---- a/arch/x86/include/asm/pgalloc.h
-+++ b/arch/x86/include/asm/pgalloc.h
-@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
+diff -urNp linux-2.6.37/arch/x86/include/asm/pgalloc.h linux-2.6.37/arch/x86/include/asm/pgalloc.h
+--- linux-2.6.37/arch/x86/include/asm/pgalloc.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/pgalloc.h 2011-01-17 02:41:01.000000000 -0500
+@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
pmd_t *pmd, pte_t *pte)
{
paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
@@ -8558,11 +8362,10 @@ index 271de94..ef944d6 100644
set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
}
-diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
-index 2334982..70bc412 100644
---- a/arch/x86/include/asm/pgtable-2level.h
-+++ b/arch/x86/include/asm/pgtable-2level.h
-@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
+diff -urNp linux-2.6.37/arch/x86/include/asm/pgtable-2level.h linux-2.6.37/arch/x86/include/asm/pgtable-2level.h
+--- linux-2.6.37/arch/x86/include/asm/pgtable-2level.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/pgtable-2level.h 2011-01-17 02:41:01.000000000 -0500
+@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -8572,11 +8375,88 @@ index 2334982..70bc412 100644
}
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
-diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
-index 177b016..a526e3c 100644
---- a/arch/x86/include/asm/pgtable-3level.h
-+++ b/arch/x86/include/asm/pgtable-3level.h
-@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+diff -urNp linux-2.6.37/arch/x86/include/asm/pgtable_32.h linux-2.6.37/arch/x86/include/asm/pgtable_32.h
+--- linux-2.6.37/arch/x86/include/asm/pgtable_32.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/pgtable_32.h 2011-01-17 02:41:01.000000000 -0500
+@@ -25,9 +25,6 @@
+ struct mm_struct;
+ struct vm_area_struct;
+
+-extern pgd_t swapper_pg_dir[1024];
+-extern pgd_t initial_page_table[1024];
+-
+ static inline void pgtable_cache_init(void) { }
+ static inline void check_pgt_cache(void) { }
+ void paging_init(void);
+@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
+ # include <asm/pgtable-2level.h>
+ #endif
+
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
++extern pgd_t initial_page_table[PTRS_PER_PGD];
++#ifdef CONFIG_X86_PAE
++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
++#endif
++
+ #if defined(CONFIG_HIGHPTE)
+ #define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
+@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
+ /* Clear a kernel PTE and flush it from the TLB */
+ #define kpte_clear_flush(ptep, vaddr) \
+ do { \
++ pax_open_kernel(); \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
++ pax_close_kernel(); \
+ __flush_tlb_one((vaddr)); \
+ } while (0)
+
+@@ -74,6 +79,9 @@ do { \
+
+ #endif /* !__ASSEMBLY__ */
+
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
+ /*
+ * kern_addr_valid() is (1) for FLATMEM and (0) for
+ * SPARSEMEM and DISCONTIGMEM
+diff -urNp linux-2.6.37/arch/x86/include/asm/pgtable_32_types.h linux-2.6.37/arch/x86/include/asm/pgtable_32_types.h
+--- linux-2.6.37/arch/x86/include/asm/pgtable_32_types.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/pgtable_32_types.h 2011-01-17 02:41:01.000000000 -0500
+@@ -8,7 +8,7 @@
+ */
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level_types.h>
+-# define PMD_SIZE (1UL << PMD_SHIFT)
++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+ # define PMD_MASK (~(PMD_SIZE - 1))
+ #else
+ # include <asm/pgtable-2level_types.h>
+@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
+ # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifndef __ASSEMBLY__
++extern unsigned char MODULES_EXEC_VADDR[];
++extern unsigned char MODULES_EXEC_END[];
++#endif
++#include <asm/boot.h>
++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
++#else
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++#endif
++
+ #define MODULES_VADDR VMALLOC_START
+ #define MODULES_END VMALLOC_END
+ #define MODULES_LEN (MODULES_VADDR - MODULES_END)
+diff -urNp linux-2.6.37/arch/x86/include/asm/pgtable-3level.h linux-2.6.37/arch/x86/include/asm/pgtable-3level.h
+--- linux-2.6.37/arch/x86/include/asm/pgtable-3level.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/pgtable-3level.h 2011-01-17 02:41:01.000000000 -0500
+@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -8593,11 +8473,63 @@ index 177b016..a526e3c 100644
}
/*
-diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index a34c785..5df98f8 100644
---- a/arch/x86/include/asm/pgtable.h
-+++ b/arch/x86/include/asm/pgtable.h
-@@ -76,12 +76,51 @@ extern struct list_head pgd_list;
+diff -urNp linux-2.6.37/arch/x86/include/asm/pgtable_64.h linux-2.6.37/arch/x86/include/asm/pgtable_64.h
+--- linux-2.6.37/arch/x86/include/asm/pgtable_64.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/pgtable_64.h 2011-01-17 02:41:01.000000000 -0500
+@@ -16,10 +16,13 @@
+
+ extern pud_t level3_kernel_pgt[512];
+ extern pud_t level3_ident_pgt[512];
++extern pud_t level3_vmalloc_pgt[512];
++extern pud_t level3_vmemmap_pgt[512];
++extern pud_t level2_vmemmap_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pmd_t level2_fixmap_pgt[512];
+-extern pmd_t level2_ident_pgt[512];
+-extern pgd_t init_level4_pgt[];
++extern pmd_t level2_ident_pgt[512*2];
++extern pgd_t init_level4_pgt[512];
+
+ #define swapper_pg_dir init_level4_pgt
+
+@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_pmd_clear(pmd_t *pmd)
+@@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
+
+ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
+ {
++ pax_open_kernel();
+ *pgdp = pgd;
++ pax_close_kernel();
+ }
+
+ static inline void native_pgd_clear(pgd_t *pgd)
+diff -urNp linux-2.6.37/arch/x86/include/asm/pgtable_64_types.h linux-2.6.37/arch/x86/include/asm/pgtable_64_types.h
+--- linux-2.6.37/arch/x86/include/asm/pgtable_64_types.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/pgtable_64_types.h 2011-01-17 02:41:01.000000000 -0500
+@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
+ #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
+ #define MODULES_END _AC(0xffffffffff000000, UL)
+ #define MODULES_LEN (MODULES_END - MODULES_VADDR)
++#define MODULES_EXEC_VADDR MODULES_VADDR
++#define MODULES_EXEC_END MODULES_END
++
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
+
+ #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
+diff -urNp linux-2.6.37/arch/x86/include/asm/pgtable.h linux-2.6.37/arch/x86/include/asm/pgtable.h
+--- linux-2.6.37/arch/x86/include/asm/pgtable.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/pgtable.h 2011-01-17 02:41:01.000000000 -0500
+@@ -78,12 +78,51 @@ extern struct mm_struct *pgd_page_get_mm
#define arch_end_context_switch(prev) do {} while(0)
@@ -8649,7 +8581,7 @@ index a34c785..5df98f8 100644
static inline int pte_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_DIRTY;
-@@ -169,9 +208,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
+@@ -171,9 +210,29 @@ static inline pte_t pte_wrprotect(pte_t
return pte_clear_flags(pte, _PAGE_RW);
}
@@ -8680,7 +8612,7 @@ index a34c785..5df98f8 100644
}
static inline pte_t pte_mkdirty(pte_t pte)
-@@ -304,6 +363,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+@@ -306,6 +365,15 @@ pte_t *populate_extra_pte(unsigned long
#endif
#ifndef __ASSEMBLY__
@@ -8696,7 +8628,7 @@ index a34c785..5df98f8 100644
#include <linux/mm_types.h>
static inline int pte_none(pte_t pte)
-@@ -474,7 +542,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+@@ -476,7 +544,7 @@ static inline pud_t *pud_offset(pgd_t *p
static inline int pgd_bad(pgd_t pgd)
{
@@ -8705,7 +8637,7 @@ index a34c785..5df98f8 100644
}
static inline int pgd_none(pgd_t pgd)
-@@ -497,7 +565,12 @@ static inline int pgd_none(pgd_t pgd)
+@@ -499,7 +567,12 @@ static inline int pgd_none(pgd_t pgd)
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
*/
@@ -8719,7 +8651,7 @@ index a34c785..5df98f8 100644
/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
-@@ -508,6 +581,20 @@ static inline int pgd_none(pgd_t pgd)
+@@ -510,6 +583,20 @@ static inline int pgd_none(pgd_t pgd)
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
@@ -8740,7 +8672,7 @@ index a34c785..5df98f8 100644
#ifndef __ASSEMBLY__
extern int direct_gbpages;
-@@ -613,11 +700,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
+@@ -617,11 +704,23 @@ static inline void ptep_set_wrprotect(st
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
@@ -8766,145 +8698,9 @@ index a34c785..5df98f8 100644
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
-diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
-index f686f49..e16ed74 100644
---- a/arch/x86/include/asm/pgtable_32.h
-+++ b/arch/x86/include/asm/pgtable_32.h
-@@ -25,9 +25,6 @@
- struct mm_struct;
- struct vm_area_struct;
-
--extern pgd_t swapper_pg_dir[1024];
--extern pgd_t trampoline_pg_dir[1024];
--
- static inline void pgtable_cache_init(void) { }
- static inline void check_pgt_cache(void) { }
- void paging_init(void);
-@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
- # include <asm/pgtable-2level.h>
- #endif
-
-+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
-+#ifdef CONFIG_X86_PAE
-+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
-+#endif
-+
- #if defined(CONFIG_HIGHPTE)
- #define __KM_PTE \
- (in_nmi() ? KM_NMI_PTE : \
-@@ -72,7 +75,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
- /* Clear a kernel PTE and flush it from the TLB */
- #define kpte_clear_flush(ptep, vaddr) \
- do { \
-+ pax_open_kernel(); \
- pte_clear(&init_mm, (vaddr), (ptep)); \
-+ pax_close_kernel(); \
- __flush_tlb_one((vaddr)); \
- } while (0)
-
-@@ -84,6 +89,9 @@ do { \
-
- #endif /* !__ASSEMBLY__ */
-
-+#define HAVE_ARCH_UNMAPPED_AREA
-+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-+
- /*
- * kern_addr_valid() is (1) for FLATMEM and (0) for
- * SPARSEMEM and DISCONTIGMEM
-diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
-index ed5903b..c7fe163 100644
---- a/arch/x86/include/asm/pgtable_32_types.h
-+++ b/arch/x86/include/asm/pgtable_32_types.h
-@@ -8,7 +8,7 @@
- */
- #ifdef CONFIG_X86_PAE
- # include <asm/pgtable-3level_types.h>
--# define PMD_SIZE (1UL << PMD_SHIFT)
-+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
- # define PMD_MASK (~(PMD_SIZE - 1))
- #else
- # include <asm/pgtable-2level_types.h>
-@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
- # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
- #endif
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+#ifndef __ASSEMBLY__
-+extern unsigned char MODULES_EXEC_VADDR[];
-+extern unsigned char MODULES_EXEC_END[];
-+#endif
-+#include <asm/boot.h>
-+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
-+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
-+#else
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+#endif
-+
- #define MODULES_VADDR VMALLOC_START
- #define MODULES_END VMALLOC_END
- #define MODULES_LEN (MODULES_VADDR - MODULES_END)
-diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index 181be52..7ab9c31 100644
---- a/arch/x86/include/asm/pgtable_64.h
-+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -16,10 +16,13 @@
-
- extern pud_t level3_kernel_pgt[512];
- extern pud_t level3_ident_pgt[512];
-+extern pud_t level3_vmalloc_pgt[512];
-+extern pud_t level3_vmemmap_pgt[512];
-+extern pud_t level2_vmemmap_pgt[512];
- extern pmd_t level2_kernel_pgt[512];
- extern pmd_t level2_fixmap_pgt[512];
--extern pmd_t level2_ident_pgt[512];
--extern pgd_t init_level4_pgt[];
-+extern pmd_t level2_ident_pgt[512*2];
-+extern pgd_t init_level4_pgt[512];
-
- #define swapper_pg_dir init_level4_pgt
-
-@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
-
- static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
- {
-+ pax_open_kernel();
- *pmdp = pmd;
-+ pax_close_kernel();
- }
-
- static inline void native_pmd_clear(pmd_t *pmd)
-@@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_t *pud)
-
- static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
- {
-+ pax_open_kernel();
- *pgdp = pgd;
-+ pax_close_kernel();
- }
-
- static inline void native_pgd_clear(pgd_t *pgd)
-diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
-index 766ea16..5b96cb3 100644
---- a/arch/x86/include/asm/pgtable_64_types.h
-+++ b/arch/x86/include/asm/pgtable_64_types.h
-@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
- #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
- #define MODULES_END _AC(0xffffffffff000000, UL)
- #define MODULES_LEN (MODULES_END - MODULES_VADDR)
-+#define MODULES_EXEC_VADDR MODULES_VADDR
-+#define MODULES_EXEC_END MODULES_END
-+
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-
- #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
-diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
-index d1f4a76..e1f9b51 100644
---- a/arch/x86/include/asm/pgtable_types.h
-+++ b/arch/x86/include/asm/pgtable_types.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/pgtable_types.h linux-2.6.37/arch/x86/include/asm/pgtable_types.h
+--- linux-2.6.37/arch/x86/include/asm/pgtable_types.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/pgtable_types.h 2011-01-17 02:41:01.000000000 -0500
@@ -16,12 +16,11 @@
#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
#define _PAGE_BIT_PAT 7 /* on 4KB pages */
@@ -8972,7 +8768,7 @@ index d1f4a76..e1f9b51 100644
#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
#endif
-@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
+@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
{
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
}
@@ -8990,7 +8786,7 @@ index d1f4a76..e1f9b51 100644
#if PAGETABLE_LEVELS > 3
typedef struct { pudval_t pud; } pud_t;
-@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
+@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
return pud.pud;
}
#else
@@ -8999,7 +8795,7 @@ index d1f4a76..e1f9b51 100644
static inline pudval_t native_pud_val(pud_t pud)
{
return native_pgd_val(pud.pgd);
-@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
+@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
return pmd.pmd;
}
#else
@@ -9016,11 +8812,10 @@ index d1f4a76..e1f9b51 100644
#define pgprot_writecombine pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);
-diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 7e5c6a6..7093da3 100644
---- a/arch/x86/include/asm/processor.h
-+++ b/arch/x86/include/asm/processor.h
-@@ -269,7 +269,7 @@ struct tss_struct {
+diff -urNp linux-2.6.37/arch/x86/include/asm/processor.h linux-2.6.37/arch/x86/include/asm/processor.h
+--- linux-2.6.37/arch/x86/include/asm/processor.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/processor.h 2011-01-17 02:41:01.000000000 -0500
+@@ -271,7 +271,7 @@ struct tss_struct {
} ____cacheline_aligned;
@@ -9029,7 +8824,7 @@ index 7e5c6a6..7093da3 100644
/*
* Save the original ist values for checking stack pointers during debugging
-@@ -884,8 +884,15 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -864,8 +864,15 @@ static inline void spin_lock_prefetch(co
*/
#define TASK_SIZE PAGE_OFFSET
#define TASK_SIZE_MAX TASK_SIZE
@@ -9046,7 +8841,7 @@ index 7e5c6a6..7093da3 100644
#define INIT_THREAD { \
.sp0 = sizeof(init_stack) + (long)&init_stack, \
-@@ -902,7 +909,7 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -882,7 +889,7 @@ static inline void spin_lock_prefetch(co
*/
#define INIT_TSS { \
.x86_tss = { \
@@ -9055,7 +8850,7 @@ index 7e5c6a6..7093da3 100644
.ss0 = __KERNEL_DS, \
.ss1 = __KERNEL_CS, \
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
-@@ -913,11 +920,7 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -893,11 +900,7 @@ static inline void spin_lock_prefetch(co
extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
@@ -9068,7 +8863,7 @@ index 7e5c6a6..7093da3 100644
/*
* The below -8 is to reserve 8 bytes on top of the ring0 stack.
-@@ -932,7 +935,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -912,7 +915,7 @@ extern unsigned long thread_saved_pc(str
#define task_pt_regs(task) \
({ \
struct pt_regs *__regs__; \
@@ -9077,7 +8872,7 @@ index 7e5c6a6..7093da3 100644
__regs__ - 1; \
})
-@@ -942,13 +945,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -922,13 +925,13 @@ extern unsigned long thread_saved_pc(str
/*
* User space process size. 47bits minus one guard page.
*/
@@ -9093,7 +8888,7 @@ index 7e5c6a6..7093da3 100644
#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-@@ -985,6 +988,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+@@ -965,6 +968,10 @@ extern void start_thread(struct pt_regs
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
@@ -9104,11 +8899,10 @@ index 7e5c6a6..7093da3 100644
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
/* Get/set a process' ability to use the timestamp counter instruction */
-diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
-index 78cd1ea..37bd171 100644
---- a/arch/x86/include/asm/ptrace.h
-+++ b/arch/x86/include/asm/ptrace.h
-@@ -152,28 +152,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
+diff -urNp linux-2.6.37/arch/x86/include/asm/ptrace.h linux-2.6.37/arch/x86/include/asm/ptrace.h
+--- linux-2.6.37/arch/x86/include/asm/ptrace.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/ptrace.h 2011-01-17 02:41:01.000000000 -0500
+@@ -152,28 +152,29 @@ static inline unsigned long regs_return_
}
/*
@@ -9144,10 +8938,9 @@ index 78cd1ea..37bd171 100644
#endif
}
-diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
-index 562d4fd..cb2d6ba 100644
---- a/arch/x86/include/asm/reboot.h
-+++ b/arch/x86/include/asm/reboot.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/reboot.h linux-2.6.37/arch/x86/include/asm/reboot.h
+--- linux-2.6.37/arch/x86/include/asm/reboot.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/reboot.h 2011-01-17 02:41:01.000000000 -0500
@@ -18,7 +18,7 @@ extern struct machine_ops machine_ops;
void native_machine_crash_shutdown(struct pt_regs *regs);
@@ -9157,196 +8950,100 @@ index 562d4fd..cb2d6ba 100644
typedef void (*nmi_shootdown_cb)(int, struct die_args*);
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
-diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
-index 606ede1..45b2044 100644
---- a/arch/x86/include/asm/rwsem.h
-+++ b/arch/x86/include/asm/rwsem.h
-@@ -118,10 +118,26 @@ static inline void __down_read(struct rw_semaphore *sem)
+diff -urNp linux-2.6.37/arch/x86/include/asm/rwsem.h linux-2.6.37/arch/x86/include/asm/rwsem.h
+--- linux-2.6.37/arch/x86/include/asm/rwsem.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/rwsem.h 2011-01-17 02:41:01.000000000 -0500
+@@ -118,6 +118,14 @@ static inline void __down_read(struct rw
{
asm volatile("# beginning down_read\n\t"
LOCK_PREFIX _ASM_INC "(%1)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ LOCK_PREFIX _ASM_DEC "(%1)\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
- /* adds 0x00000001, returns the old value */
-- " jns 1f\n"
-+ " jns 2f\n"
+ /* adds 0x00000001 */
+ " jns 1f\n"
" call call_rwsem_down_read_failed\n"
-- "1:\n\t"
-+ "2:\n\t"
- "# ending down_read\n\t"
- : "+m" (sem->count)
- : "a" (sem)
-@@ -136,13 +152,29 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
- rwsem_count_t result, tmp;
- asm volatile("# beginning __down_read_trylock\n\t"
- " mov %0,%1\n\t"
-- "1:\n\t"
-+ "2:\n\t"
+@@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
+ "1:\n\t"
" mov %1,%2\n\t"
" add %3,%2\n\t"
-- " jle 2f\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ "sub %3,%2\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
-+ " jle 3f\n\t"
+ " jle 2f\n\t"
LOCK_PREFIX " cmpxchg %2,%0\n\t"
-- " jnz 1b\n\t"
-- "2:\n\t"
-+ " jnz 2b\n\t"
-+ "3:\n\t"
- "# ending __down_read_trylock\n\t"
- : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
- : "i" (RWSEM_ACTIVE_READ_BIAS)
-@@ -160,12 +192,28 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
- tmp = RWSEM_ACTIVE_WRITE_BIAS;
+ " jnz 1b\n\t"
+@@ -158,6 +174,14 @@ static inline void __down_write_nested(s
+ rwsem_count_t tmp;
asm volatile("# beginning down_write\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ "mov %1,(%2)\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
- /* subtract 0x0000ffff, returns the old value */
+ /* adds 0xffff0001, returns the old value */
" test %1,%1\n\t"
/* was the count 0 before? */
-- " jz 1f\n"
-+ " jz 2f\n"
- " call call_rwsem_down_write_failed\n"
-- "1:\n"
-+ "2:\n"
- "# ending down_write"
- : "+m" (sem->count), "=d" (tmp)
- : "a" (sem), "1" (tmp)
-@@ -198,10 +246,26 @@ static inline void __up_read(struct rw_semaphore *sem)
- rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
+@@ -196,6 +220,14 @@ static inline void __up_read(struct rw_s
+ rwsem_count_t tmp;
asm volatile("# beginning __up_read\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ "mov %1,(%2)\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
/* subtracts 1, returns the old value */
-- " jns 1f\n\t"
-+ " jns 2f\n\t"
- " call call_rwsem_wake\n"
-- "1:\n"
-+ "2:\n"
- "# ending __up_read\n"
- : "+m" (sem->count), "=d" (tmp)
- : "a" (sem), "1" (tmp)
-@@ -216,11 +280,27 @@ static inline void __up_write(struct rw_semaphore *sem)
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n" /* expects old value in %edx */
+@@ -214,6 +246,14 @@ static inline void __up_write(struct rw_
rwsem_count_t tmp;
asm volatile("# beginning __up_write\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
++ "mov %1,(%2)\n"
+ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
-+ "mov %1,(%2)\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
-+#endif
-+
- /* tries to transition
- 0xffff0001 -> 0x00000000 */
-- " jz 1f\n"
-+ " jz 2f\n"
- " call call_rwsem_wake\n"
-- "1:\n\t"
-+ "2:\n\t"
- "# ending __up_write\n"
- : "+m" (sem->count), "=d" (tmp)
- : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
-@@ -234,13 +314,29 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
++
+ /* subtracts 0xffff0001, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n" /* expects old value in %edx */
+@@ -231,6 +271,14 @@ static inline void __downgrade_write(str
{
asm volatile("# beginning __downgrade_write\n\t"
LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
/*
* transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
* 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
- */
-- " jns 1f\n\t"
-+ " jns 2f\n\t"
- " call call_rwsem_downgrade_wake\n"
-- "1:\n\t"
-+ "2:\n\t"
- "# ending __downgrade_write\n"
- : "+m" (sem->count)
- : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
-@@ -253,7 +349,23 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+@@ -250,7 +298,15 @@ static inline void __downgrade_write(str
static inline void rwsem_atomic_add(rwsem_count_t delta,
struct rw_semaphore *sem)
{
@@ -9354,24 +9051,16 @@ index 606ede1..45b2044 100644
+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
: "+m" (sem->count)
: "er" (delta));
}
-@@ -266,7 +378,23 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
+@@ -263,7 +319,15 @@ static inline rwsem_count_t rwsem_atomic
{
rwsem_count_t tmp = delta;
@@ -9379,27 +9068,18 @@ index 606ede1..45b2044 100644
+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ "mov %0,%1\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
: "+r" (tmp), "+m" (sem->count)
: : "memory");
-diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
-index 14e0ed8..7bad9b6 100644
---- a/arch/x86/include/asm/segment.h
-+++ b/arch/x86/include/asm/segment.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/segment.h linux-2.6.37/arch/x86/include/asm/segment.h
+--- linux-2.6.37/arch/x86/include/asm/segment.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/segment.h 2011-01-17 02:41:01.000000000 -0500
@@ -62,8 +62,8 @@
* 26 - ESPFIX small SS
* 27 - per-cpu [ offset to per-cpu data area ]
@@ -9413,30 +9093,21 @@ index 14e0ed8..7bad9b6 100644
#define GDT_ENTRY_TLS_MIN 6
@@ -77,6 +77,8 @@
- #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
+ #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
+
- #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
+ #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
- #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
-@@ -88,7 +90,7 @@
- #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
- #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
-
--#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
-+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
- #ifdef CONFIG_SMP
- #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
- #else
+ #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
@@ -102,6 +104,12 @@
#define __KERNEL_STACK_CANARY 0
#endif
-+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
+
-+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
+
#define GDT_ENTRY_DOUBLEFAULT_TSS 31
@@ -9463,15 +9134,14 @@ index 14e0ed8..7bad9b6 100644
@@ -183,6 +193,7 @@
#endif
- #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
-+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
- #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
- #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
- #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
-diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
-index 4c2f63c..06ded5b 100644
---- a/arch/x86/include/asm/smp.h
-+++ b/arch/x86/include/asm/smp.h
+ #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
++#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
+ #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
+ #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
+ #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
+diff -urNp linux-2.6.37/arch/x86/include/asm/smp.h linux-2.6.37/arch/x86/include/asm/smp.h
+--- linux-2.6.37/arch/x86/include/asm/smp.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/smp.h 2011-01-17 02:41:01.000000000 -0500
@@ -24,7 +24,7 @@ extern unsigned int num_processors;
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
@@ -9481,66 +9151,40 @@ index 4c2f63c..06ded5b 100644
static inline struct cpumask *cpu_sibling_mask(int cpu)
{
-diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
-index 3089f70..8a618b0 100644
---- a/arch/x86/include/asm/spinlock.h
-+++ b/arch/x86/include/asm/spinlock.h
-@@ -249,18 +249,50 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
+diff -urNp linux-2.6.37/arch/x86/include/asm/spinlock.h linux-2.6.37/arch/x86/include/asm/spinlock.h
+--- linux-2.6.37/arch/x86/include/asm/spinlock.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/spinlock.h 2011-01-17 02:41:01.000000000 -0500
+@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
static inline void arch_read_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
-- "jns 1f\n"
-- "call __read_lock_failed\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
- "1:\n"
+ LOCK_PREFIX " addl $1,(%0)\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
-+ "jns 2f\n"
-+ "call __read_lock_failed\n\t"
-+ "2:\n"
- ::LOCK_PTR_REG (rw) : "memory");
- }
-
+ "jns 1f\n"
+ "call __read_lock_failed\n\t"
+ "1:\n"
+@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
static inline void arch_write_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
-- "jz 1f\n"
-- "call __write_lock_failed\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
- "1:\n"
+ LOCK_PREFIX " addl %1,(%0)\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
-+ "jz 2f\n"
-+ "call __write_lock_failed\n\t"
-+ "2:\n"
- ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
- }
-
-@@ -286,12 +318,45 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
+ "jz 1f\n"
+ "call __write_lock_failed\n\t"
+ "1:\n"
+@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
@@ -9548,18 +9192,10 @@ index 3089f70..8a618b0 100644
+ asm volatile(LOCK_PREFIX "incl %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
-+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
+ LOCK_PREFIX "decl %0\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
+ :"+m" (rw->lock) : : "memory");
@@ -9571,28 +9207,31 @@ index 3089f70..8a618b0 100644
+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
-+#ifdef CONFIG_X86_32
-+ "into\n0:\n"
-+#else
+ "jno 0f\n"
++ LOCK_PREFIX "subl %1, %0\n"
+ "int $4\n0:\n"
-+#endif
-+ ".pushsection .fixup,\"ax\"\n"
-+ "1:\n"
-+ LOCK_PREFIX "subl %1,%0\n"
-+ "jmp 0b\n"
-+ ".popsection\n"
-+ _ASM_EXTABLE(0b, 1b)
++ _ASM_EXTABLE(0b, 0b)
+#endif
+
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
}
-diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
-index e7f4d33..0bef582 100644
---- a/arch/x86/include/asm/system.h
-+++ b/arch/x86/include/asm/system.h
-@@ -202,7 +202,7 @@ static inline unsigned long get_limit(unsigned long segment)
+diff -urNp linux-2.6.37/arch/x86/include/asm/stackprotector.h linux-2.6.37/arch/x86/include/asm/stackprotector.h
+--- linux-2.6.37/arch/x86/include/asm/stackprotector.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/stackprotector.h 2011-01-17 02:41:01.000000000 -0500
+@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
+
+ static inline void load_stack_canary_segment(void)
+ {
+-#ifdef CONFIG_X86_32
++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
+ asm volatile ("mov %0, %%gs" : : "r" (0));
+ #endif
+ }
+diff -urNp linux-2.6.37/arch/x86/include/asm/system.h linux-2.6.37/arch/x86/include/asm/system.h
+--- linux-2.6.37/arch/x86/include/asm/system.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/system.h 2011-01-17 02:41:01.000000000 -0500
+@@ -202,7 +202,7 @@ static inline unsigned long get_limit(un
{
unsigned long __limit;
asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
@@ -9610,239 +9249,10 @@ index e7f4d33..0bef582 100644
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
-diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
-index abd3e0e..cbddafc 100644
---- a/arch/x86/include/asm/uaccess.h
-+++ b/arch/x86/include/asm/uaccess.h
-@@ -8,12 +8,15 @@
- #include <linux/thread_info.h>
- #include <linux/prefetch.h>
- #include <linux/string.h>
-+#include <linux/sched.h>
- #include <asm/asm.h>
- #include <asm/page.h>
-
- #define VERIFY_READ 0
- #define VERIFY_WRITE 1
-
-+extern void check_object_size(const void *ptr, unsigned long n, bool to);
-+
- /*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
-@@ -29,7 +32,12 @@
-
- #define get_ds() (KERNEL_DS)
- #define get_fs() (current_thread_info()->addr_limit)
-+#ifdef CONFIG_X86_32
-+void __set_fs(mm_segment_t x, int cpu);
-+void set_fs(mm_segment_t x);
-+#else
- #define set_fs(x) (current_thread_info()->addr_limit = (x))
-+#endif
-
- #define segment_eq(a, b) ((a).seg == (b).seg)
-
-@@ -77,7 +85,33 @@
- * checks that the pointer is in the user space range - after calling
- * this function, memory access functions may still return -EFAULT.
- */
--#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
-+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
-+#define access_ok(type, addr, size) \
-+({ \
-+ long __size = size; \
-+ unsigned long __addr = (unsigned long)addr; \
-+ unsigned long __addr_ao = __addr & PAGE_MASK; \
-+ unsigned long __end_ao = __addr + __size - 1; \
-+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
-+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
-+ while(__addr_ao <= __end_ao) { \
-+ char __c_ao; \
-+ __addr_ao += PAGE_SIZE; \
-+ if (__size > PAGE_SIZE) \
-+ cond_resched(); \
-+ if (__get_user(__c_ao, (char __user *)__addr)) \
-+ break; \
-+ if (type != VERIFY_WRITE) { \
-+ __addr = __addr_ao; \
-+ continue; \
-+ } \
-+ if (__put_user(__c_ao, (char __user *)__addr)) \
-+ break; \
-+ __addr = __addr_ao; \
-+ } \
-+ } \
-+ __ret_ao; \
-+})
-
- /*
- * The exception table consists of pairs of addresses: the first is the
-@@ -183,13 +217,21 @@ extern int __get_user_bad(void);
- asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
- : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
-
--
-+#ifdef CONFIG_X86_32
-+#define _ASM_LOAD_USER_DS(ds) "movw %w" #ds ",%%ds\n"
-+#define _ASM_LOAD_KERNEL_DS "pushl %%ss; popl %%ds\n"
-+#else
-+#define _ASM_LOAD_USER_DS(ds)
-+#define _ASM_LOAD_KERNEL_DS
-+#endif
-
- #ifdef CONFIG_X86_32
- #define __put_user_asm_u64(x, addr, err, errret) \
-- asm volatile("1: movl %%eax,0(%2)\n" \
-- "2: movl %%edx,4(%2)\n" \
-+ asm volatile(_ASM_LOAD_USER_DS(5) \
-+ "1: movl %%eax,%%ds:0(%2)\n" \
-+ "2: movl %%edx,%%ds:4(%2)\n" \
- "3:\n" \
-+ _ASM_LOAD_KERNEL_DS \
- ".section .fixup,\"ax\"\n" \
- "4: movl %3,%0\n" \
- " jmp 3b\n" \
-@@ -197,15 +239,18 @@ extern int __get_user_bad(void);
- _ASM_EXTABLE(1b, 4b) \
- _ASM_EXTABLE(2b, 4b) \
- : "=r" (err) \
-- : "A" (x), "r" (addr), "i" (errret), "0" (err))
-+ : "A" (x), "r" (addr), "i" (errret), "0" (err), \
-+ "r"(__USER_DS))
-
- #define __put_user_asm_ex_u64(x, addr) \
-- asm volatile("1: movl %%eax,0(%1)\n" \
-- "2: movl %%edx,4(%1)\n" \
-+ asm volatile(_ASM_LOAD_USER_DS(2) \
-+ "1: movl %%eax,%%ds:0(%1)\n" \
-+ "2: movl %%edx,%%ds:4(%1)\n" \
- "3:\n" \
-+ _ASM_LOAD_KERNEL_DS \
- _ASM_EXTABLE(1b, 2b - 1b) \
- _ASM_EXTABLE(2b, 3b - 2b) \
-- : : "A" (x), "r" (addr))
-+ : : "A" (x), "r" (addr), "r"(__USER_DS))
-
- #define __put_user_x8(x, ptr, __ret_pu) \
- asm volatile("call __put_user_8" : "=a" (__ret_pu) \
-@@ -374,16 +419,18 @@ do { \
- } while (0)
-
- #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
-- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
-+ asm volatile(_ASM_LOAD_USER_DS(5) \
-+ "1: mov"itype" %%ds:%2,%"rtype"1\n" \
- "2:\n" \
-+ _ASM_LOAD_KERNEL_DS \
- ".section .fixup,\"ax\"\n" \
- "3: mov %3,%0\n" \
- " xor"itype" %"rtype"1,%"rtype"1\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
-- : "=r" (err), ltype(x) \
-- : "m" (__m(addr)), "i" (errret), "0" (err))
-+ : "=r" (err), ltype (x) \
-+ : "m" (__m(addr)), "i" (errret), "0" (err), "r"(__USER_DS))
-
- #define __get_user_size_ex(x, ptr, size) \
- do { \
-@@ -407,10 +454,12 @@ do { \
- } while (0)
-
- #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
-- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
-+ asm volatile(_ASM_LOAD_USER_DS(2) \
-+ "1: mov"itype" %%ds:%1,%"rtype"0\n" \
- "2:\n" \
-+ _ASM_LOAD_KERNEL_DS \
- _ASM_EXTABLE(1b, 2b - 1b) \
-- : ltype(x) : "m" (__m(addr)))
-+ : ltype(x) : "m" (__m(addr)), "r"(__USER_DS))
-
- #define __put_user_nocheck(x, ptr, size) \
- ({ \
-@@ -424,13 +473,24 @@ do { \
- int __gu_err; \
- unsigned long __gu_val; \
- __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
-- (x) = (__force __typeof__(*(ptr)))__gu_val; \
-+ (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
- })
-
- /* FIXME: this hack is definitely wrong -AK */
- struct __large_struct { unsigned long buf[100]; };
--#define __m(x) (*(struct __large_struct __user *)(x))
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+#define ____m(x) \
-+({ \
-+ unsigned long ____x = (unsigned long)(x); \
-+ if (____x < PAX_USER_SHADOW_BASE) \
-+ ____x += PAX_USER_SHADOW_BASE; \
-+ (void __user *)____x; \
-+})
-+#else
-+#define ____m(x) (x)
-+#endif
-+#define __m(x) (*(struct __large_struct __user *)____m(x))
-
- /*
- * Tell gcc we read from memory instead of writing: this is because
-@@ -438,21 +498,26 @@ struct __large_struct { unsigned long buf[100]; };
- * aliasing issues.
- */
- #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
-- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
-+ asm volatile(_ASM_LOAD_USER_DS(5) \
-+ "1: mov"itype" %"rtype"1,%%ds:%2\n" \
- "2:\n" \
-+ _ASM_LOAD_KERNEL_DS \
- ".section .fixup,\"ax\"\n" \
- "3: mov %3,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
- : "=r"(err) \
-- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
-+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err),\
-+ "r"(__USER_DS))
-
- #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
-- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
-+ asm volatile(_ASM_LOAD_USER_DS(2) \
-+ "1: mov"itype" %"rtype"0,%%ds:%1\n" \
- "2:\n" \
-+ _ASM_LOAD_KERNEL_DS \
- _ASM_EXTABLE(1b, 2b - 1b) \
-- : : ltype(x), "m" (__m(addr)))
-+ : : ltype(x), "m" (__m(addr)), "r"(__USER_DS))
-
- /*
- * uaccess_try and catch
-@@ -530,7 +595,7 @@ struct __large_struct { unsigned long buf[100]; };
- #define get_user_ex(x, ptr) do { \
- unsigned long __gue_val; \
- __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
-- (x) = (__force __typeof__(*(ptr)))__gue_val; \
-+ (x) = (__typeof__(*(ptr)))__gue_val; \
- } while (0)
-
- #ifdef CONFIG_X86_WP_WORKS_OK
-@@ -567,6 +632,7 @@ extern struct movsl_mask {
-
- #define ARCH_HAS_NOCACHE_UACCESS 1
-
-+#define ARCH_HAS_SORT_EXTABLE
- #ifdef CONFIG_X86_32
- # include "uaccess_32.h"
- #else
-diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
-index 088d09f..c719d0a 100644
---- a/arch/x86/include/asm/uaccess_32.h
-+++ b/arch/x86/include/asm/uaccess_32.h
-@@ -44,6 +44,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
+diff -urNp linux-2.6.37/arch/x86/include/asm/uaccess_32.h linux-2.6.37/arch/x86/include/asm/uaccess_32.h
+--- linux-2.6.37/arch/x86/include/asm/uaccess_32.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/uaccess_32.h 2011-01-17 02:41:01.000000000 -0500
+@@ -44,6 +44,9 @@ unsigned long __must_check __copy_from_u
static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
@@ -9852,7 +9262,7 @@ index 088d09f..c719d0a 100644
if (__builtin_constant_p(n)) {
unsigned long ret;
-@@ -62,6 +65,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+@@ -62,6 +65,8 @@ __copy_to_user_inatomic(void __user *to,
return ret;
}
}
@@ -9861,7 +9271,7 @@ index 088d09f..c719d0a 100644
return __copy_to_user_ll(to, from, n);
}
-@@ -89,6 +94,9 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
+@@ -89,6 +94,9 @@ __copy_to_user(void __user *to, const vo
static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
@@ -9882,7 +9292,7 @@ index 088d09f..c719d0a 100644
if (__builtin_constant_p(n)) {
unsigned long ret;
-@@ -153,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
+@@ -153,6 +165,8 @@ __copy_from_user(void *to, const void __
return ret;
}
}
@@ -9891,7 +9301,7 @@ index 088d09f..c719d0a 100644
return __copy_from_user_ll(to, from, n);
}
-@@ -160,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
+@@ -160,6 +174,10 @@ static __always_inline unsigned long __c
const void __user *from, unsigned long n)
{
might_fault();
@@ -9929,7 +9339,7 @@ index 088d09f..c719d0a 100644
extern void copy_from_user_overflow(void)
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-@@ -200,17 +222,61 @@ extern void copy_from_user_overflow(void)
+@@ -200,17 +222,61 @@ extern void copy_from_user_overflow(void
#endif
;
@@ -9998,10 +9408,9 @@ index 088d09f..c719d0a 100644
return n;
}
-diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
-index 316708d..8e13510 100644
---- a/arch/x86/include/asm/uaccess_64.h
-+++ b/arch/x86/include/asm/uaccess_64.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/uaccess_64.h linux-2.6.37/arch/x86/include/asm/uaccess_64.h
+--- linux-2.6.37/arch/x86/include/asm/uaccess_64.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/uaccess_64.h 2011-01-17 02:41:01.000000000 -0500
@@ -11,6 +11,9 @@
#include <asm/alternative.h>
#include <asm/cpufeature.h>
@@ -10012,7 +9421,7 @@ index 316708d..8e13510 100644
/*
* Copy To/From Userspace
-@@ -37,26 +40,26 @@ copy_user_generic(void *to, const void *from, unsigned len)
+@@ -37,26 +40,26 @@ copy_user_generic(void *to, const void *
return ret;
}
@@ -10052,7 +9461,7 @@ index 316708d..8e13510 100644
return n;
}
-@@ -65,17 +68,35 @@ int copy_to_user(void __user *dst, const void *src, unsigned size)
+@@ -65,110 +68,174 @@ int copy_to_user(void __user *dst, const
{
might_fault();
@@ -10072,10 +9481,16 @@ index 316708d..8e13510 100644
might_fault();
- if (!__builtin_constant_p(size))
+- return copy_user_generic(dst, (__force void *)src, size);
+
+ if ((int)size < 0)
+ return size;
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
+ if (unlikely(sz != -1 && sz < size)) {
+#ifdef CONFIG_DEBUG_VM
+ WARN(1, "Buffer overflow detected!\n");
@@ -10087,18 +9502,52 @@ index 316708d..8e13510 100644
+ check_object_size(dst, size, false);
+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
+ src += PAX_USER_SHADOW_BASE;
- return copy_user_generic(dst, (__force void *)src, size);
++ return copy_user_generic(dst, (__force const void *)src, size);
+ }
switch (size) {
- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
++ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
ret, "b", "b", "=q", 1);
-@@ -108,18 +129,36 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
+ return ret;
+- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
++ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ return ret;
+- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
++ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ return ret;
+- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ return ret;
+ case 10:
+- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 10);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u16 *)(8 + (char *)dst),
+- (u16 __user *)(8 + (char __user *)src),
++ (const u16 __user *)(8 + (const char __user *)src),
+ ret, "w", "w", "=r", 2);
+ return ret;
+ case 16:
+- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 16);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u64 *)(8 + (char *)dst),
+- (u64 __user *)(8 + (char __user *)src),
++ (const u64 __user *)(8 + (const char __user *)src),
ret, "q", "", "=r", 8);
return ret;
default:
+- return copy_user_generic(dst, (__force void *)src, size);
+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
+ src += PAX_USER_SHADOW_BASE;
- return copy_user_generic(dst, (__force void *)src, size);
++ return copy_user_generic(dst, (__force const void *)src, size);
}
}
@@ -10116,6 +9565,11 @@ index 316708d..8e13510 100644
+ if ((int)size < 0)
+ return size;
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
+ if (unlikely(sz != -1 && sz < size)) {
+#ifdef CONFIG_DEBUG_VM
+ WARN(1, "Buffer overflow detected!\n");
@@ -10130,9 +9584,42 @@ index 316708d..8e13510 100644
return copy_user_generic((__force void *)dst, src, size);
+ }
switch (size) {
- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
++ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
ret, "b", "b", "iq", 1);
-@@ -152,19 +191,30 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
+ return ret;
+- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
++ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
++ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
+ ret, "l", "k", "ir", 4);
+ return ret;
+- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ case 10:
+- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 10);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
++ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+ case 16:
+- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 16);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
++ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
ret, "q", "", "er", 8);
return ret;
default:
@@ -10155,18 +9642,58 @@ index 316708d..8e13510 100644
+ if ((int)size < 0)
+ return size;
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
+ if (!__builtin_constant_p(size)) {
+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
+ src += PAX_USER_SHADOW_BASE;
+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
+ dst += PAX_USER_SHADOW_BASE;
return copy_user_generic((__force void *)dst,
- (__force void *)src, size);
+- (__force void *)src, size);
++ (__force const void *)src, size);
+ }
switch (size) {
case 1: {
u8 tmp;
-@@ -204,6 +254,10 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+- __get_user_asm(tmp, (u8 __user *)src,
++ __get_user_asm(tmp, (const u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u8 __user *)dst,
+@@ -177,7 +244,7 @@ int __copy_in_user(void __user *dst, con
+ }
+ case 2: {
+ u16 tmp;
+- __get_user_asm(tmp, (u16 __user *)src,
++ __get_user_asm(tmp, (const u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u16 __user *)dst,
+@@ -187,7 +254,7 @@ int __copy_in_user(void __user *dst, con
+
+ case 4: {
+ u32 tmp;
+- __get_user_asm(tmp, (u32 __user *)src,
++ __get_user_asm(tmp, (const u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u32 __user *)dst,
+@@ -196,7 +263,7 @@ int __copy_in_user(void __user *dst, con
+ }
+ case 8: {
+ u64 tmp;
+- __get_user_asm(tmp, (u64 __user *)src,
++ __get_user_asm(tmp, (const u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u64 __user *)dst,
+@@ -204,8 +271,12 @@ int __copy_in_user(void __user *dst, con
return ret;
}
default:
@@ -10175,12 +9702,23 @@ index 316708d..8e13510 100644
+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
+ dst += PAX_USER_SHADOW_BASE;
return copy_user_generic((__force void *)dst,
- (__force void *)src, size);
+- (__force void *)src, size);
++ (__force const void *)src, size);
}
-@@ -222,33 +276,45 @@ __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
+ }
+
+@@ -222,33 +293,68 @@ __must_check unsigned long __clear_user(
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
++ if ((int)size < 0)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
+ src += PAX_USER_SHADOW_BASE;
return copy_user_generic(dst, (__force const void *)src, size);
@@ -10193,6 +9731,11 @@ index 316708d..8e13510 100644
+ if ((int)size < 0)
+ return size;
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
+ dst += PAX_USER_SHADOW_BASE;
return copy_user_generic((__force void *)dst, src, size);
@@ -10211,6 +9754,11 @@ index 316708d..8e13510 100644
+ if ((int)size < 0)
+ return size;
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
return __copy_user_nocache(dst, src, size, 1);
}
@@ -10222,6 +9770,11 @@ index 316708d..8e13510 100644
+ if ((int)size < 0)
+ return size;
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
return __copy_user_nocache(dst, src, size, 0);
}
@@ -10230,10 +9783,230 @@ index 316708d..8e13510 100644
copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
#endif /* _ASM_X86_UACCESS_64_H */
-diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
-index 3d61e20..9507180 100644
---- a/arch/x86/include/asm/vgtod.h
-+++ b/arch/x86/include/asm/vgtod.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/uaccess.h linux-2.6.37/arch/x86/include/asm/uaccess.h
+--- linux-2.6.37/arch/x86/include/asm/uaccess.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/uaccess.h 2011-01-17 02:41:01.000000000 -0500
+@@ -8,12 +8,15 @@
+ #include <linux/thread_info.h>
+ #include <linux/prefetch.h>
+ #include <linux/string.h>
++#include <linux/sched.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
+
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+@@ -29,7 +32,12 @@
+
+ #define get_ds() (KERNEL_DS)
+ #define get_fs() (current_thread_info()->addr_limit)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++void __set_fs(mm_segment_t x);
++void set_fs(mm_segment_t x);
++#else
+ #define set_fs(x) (current_thread_info()->addr_limit = (x))
++#endif
+
+ #define segment_eq(a, b) ((a).seg == (b).seg)
+
+@@ -77,7 +85,33 @@
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define access_ok(type, addr, size) \
++({ \
++ long __size = size; \
++ unsigned long __addr = (unsigned long)addr; \
++ unsigned long __addr_ao = __addr & PAGE_MASK; \
++ unsigned long __end_ao = __addr + __size - 1; \
++ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
++ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
++ while(__addr_ao <= __end_ao) { \
++ char __c_ao; \
++ __addr_ao += PAGE_SIZE; \
++ if (__size > PAGE_SIZE) \
++ cond_resched(); \
++ if (__get_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ if (type != VERIFY_WRITE) { \
++ __addr = __addr_ao; \
++ continue; \
++ } \
++ if (__put_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ __addr = __addr_ao; \
++ } \
++ } \
++ __ret_ao; \
++})
+
+ /*
+ * The exception table consists of pairs of addresses: the first is the
+@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
+ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+
+-
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg "%%gs:"
++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
++#else
++#define __copyuser_seg
++#define __COPYUSER_SET_ES
++#define __COPYUSER_RESTORE_ES
++#endif
+
+ #ifdef CONFIG_X86_32
+ #define __put_user_asm_u64(x, addr, err, errret) \
+- asm volatile("1: movl %%eax,0(%2)\n" \
+- "2: movl %%edx,4(%2)\n" \
++ asm volatile("1: movl %%eax," __copyuser_seg"0(%2)\n" \
++ "2: movl %%edx," __copyuser_seg"4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
+ : "A" (x), "r" (addr), "i" (errret), "0" (err))
+
+ #define __put_user_asm_ex_u64(x, addr) \
+- asm volatile("1: movl %%eax,0(%1)\n" \
+- "2: movl %%edx,4(%1)\n" \
++ asm volatile("1: movl %%eax," __copyuser_seg"0(%1)\n" \
++ "2: movl %%edx," __copyuser_seg"4(%1)\n" \
+ "3:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ _ASM_EXTABLE(2b, 3b - 2b) \
+@@ -374,7 +416,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
++ asm volatile("1: mov"itype" "__copyuser_seg"%2,%"rtype"1\n"\
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+@@ -382,7 +424,7 @@ do { \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (err), ltype(x) \
++ : "=r" (err), ltype (x) \
+ : "m" (__m(addr)), "i" (errret), "0" (err))
+
+ #define __get_user_size_ex(x, ptr, size) \
+@@ -407,7 +449,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
++ asm volatile("1: mov"itype" "__copyuser_seg"%1,%"rtype"0\n"\
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : ltype(x) : "m" (__m(addr)))
+@@ -424,13 +466,24 @@ do { \
+ int __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+- (x) = (__force __typeof__(*(ptr)))__gu_val; \
++ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+ })
+
+ /* FIXME: this hack is definitely wrong -AK */
+ struct __large_struct { unsigned long buf[100]; };
+-#define __m(x) (*(struct __large_struct __user *)(x))
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define ____m(x) \
++({ \
++ unsigned long ____x = (unsigned long)(x); \
++ if (____x < PAX_USER_SHADOW_BASE) \
++ ____x += PAX_USER_SHADOW_BASE; \
++ (void __user *)____x; \
++})
++#else
++#define ____m(x) (x)
++#endif
++#define __m(x) (*(struct __large_struct __user *)____m(x))
+
+ /*
+ * Tell gcc we read from memory instead of writing: this is because
+@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
+ * aliasing issues.
+ */
+ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
++ asm volatile("1: mov"itype" %"rtype"1," __copyuser_seg"%2\n"\
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r"(err) \
+- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
+
+ #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
++ asm volatile("1: mov"itype" %"rtype"0," __copyuser_seg"%1\n"\
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : : ltype(x), "m" (__m(addr)))
+@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
+ * On error, the variable @x is set to zero.
+ */
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __get_user(x, ptr) get_user((x), (ptr))
++#else
+ #define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
++#endif
+
+ /**
+ * __put_user: - Write a simple value into user space, with less checking.
+@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
+ * Returns zero on success, or -EFAULT on error.
+ */
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __put_user(x, ptr) put_user((x), (ptr))
++#else
+ #define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
++#endif
+
+ #define __get_user_unaligned __get_user
+ #define __put_user_unaligned __put_user
+@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
+ #define get_user_ex(x, ptr) do { \
+ unsigned long __gue_val; \
+ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
+- (x) = (__force __typeof__(*(ptr)))__gue_val; \
++ (x) = (__typeof__(*(ptr)))__gue_val; \
+ } while (0)
+
+ #ifdef CONFIG_X86_WP_WORKS_OK
+@@ -567,6 +628,7 @@ extern struct movsl_mask {
+
+ #define ARCH_HAS_NOCACHE_UACCESS 1
+
++#define ARCH_HAS_SORT_EXTABLE
+ #ifdef CONFIG_X86_32
+ # include "uaccess_32.h"
+ #else
+diff -urNp linux-2.6.37/arch/x86/include/asm/vgtod.h linux-2.6.37/arch/x86/include/asm/vgtod.h
+--- linux-2.6.37/arch/x86/include/asm/vgtod.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/vgtod.h 2011-01-17 02:41:01.000000000 -0500
@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
int sysctl_enabled;
struct timezone sys_tz;
@@ -10242,22 +10015,9 @@ index 3d61e20..9507180 100644
cycle_t (*vread)(void);
cycle_t cycle_last;
cycle_t mask;
-diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
-index 61e08c0..b0da582 100644
---- a/arch/x86/include/asm/vmi.h
-+++ b/arch/x86/include/asm/vmi.h
-@@ -191,6 +191,7 @@ struct vrom_header {
- u8 reserved[96]; /* Reserved for headers */
- char vmi_init[8]; /* VMI_Init jump point */
- char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
-+ char rom_data[8048]; /* rest of the option ROM */
- } __attribute__((packed));
-
- struct pnp_header {
-diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
-index d0983d2..1f7c9e9 100644
---- a/arch/x86/include/asm/vsyscall.h
-+++ b/arch/x86/include/asm/vsyscall.h
+diff -urNp linux-2.6.37/arch/x86/include/asm/vsyscall.h linux-2.6.37/arch/x86/include/asm/vsyscall.h
+--- linux-2.6.37/arch/x86/include/asm/vsyscall.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/vsyscall.h 2011-01-17 02:41:01.000000000 -0500
@@ -15,9 +15,10 @@ enum vsyscall_num {
#ifdef __KERNEL__
@@ -10288,24 +10048,22 @@ index d0983d2..1f7c9e9 100644
#endif /* __KERNEL__ */
#endif /* _ASM_X86_VSYSCALL_H */
-diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
-index 2c4390c..0dda6eb 100644
---- a/arch/x86/include/asm/xsave.h
-+++ b/arch/x86/include/asm/xsave.h
-@@ -59,6 +59,12 @@ static inline int fpu_xrstor_checking(struct fpu *fpu)
- static inline int xsave_user(struct xsave_struct __user *buf)
+diff -urNp linux-2.6.37/arch/x86/include/asm/xsave.h linux-2.6.37/arch/x86/include/asm/xsave.h
+--- linux-2.6.37/arch/x86/include/asm/xsave.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/include/asm/xsave.h 2011-01-17 02:41:01.000000000 -0500
+@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
{
int err;
-+
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
+#endif
+
- __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
-@@ -85,6 +91,11 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
+ /*
+ * Clear the xsave header first, so that reserved fields are
+ * initialized to zero.
+@@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
u32 lmask = mask;
u32 hmask = mask >> 32;
@@ -10317,11 +10075,136 @@ index 2c4390c..0dda6eb 100644
__asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
"2:\n"
".section .fixup,\"ax\"\n"
-diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
-index c05872a..eb10001 100644
---- a/arch/x86/kernel/acpi/boot.c
-+++ b/arch/x86/kernel/acpi/boot.c
-@@ -1472,7 +1472,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
+diff -urNp linux-2.6.37/arch/x86/Kconfig linux-2.6.37/arch/x86/Kconfig
+--- linux-2.6.37/arch/x86/Kconfig 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/Kconfig 2011-01-17 02:41:01.000000000 -0500
+@@ -225,7 +225,7 @@ config X86_TRAMPOLINE
+
+ config X86_32_LAZY_GS
+ def_bool y
+- depends on X86_32 && !CC_STACKPROTECTOR
++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
+
+ config ARCH_HWEIGHT_CFLAGS
+ string
+@@ -992,7 +992,7 @@ choice
+
+ config NOHIGHMEM
+ bool "off"
+- depends on !X86_NUMAQ
++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+ However, the address space of 32-bit x86 processors is only 4
+@@ -1029,7 +1029,7 @@ config NOHIGHMEM
+
+ config HIGHMEM4G
+ bool "4GB"
+- depends on !X86_NUMAQ
++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Select this if you have a 32-bit processor and between 1 and 4
+ gigabytes of physical RAM.
+@@ -1083,7 +1083,7 @@ config PAGE_OFFSET
+ hex
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0x80000000 if VMSPLIT_2G
+- default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x70000000 if VMSPLIT_2G_OPT
+ default 0x40000000 if VMSPLIT_1G
+ default 0xC0000000
+ depends on X86_32
+@@ -1427,7 +1427,7 @@ config ARCH_USES_PG_UNCACHED
+
+ config EFI
+ bool "EFI runtime service support"
+- depends on ACPI
++ depends on ACPI && !PAX_KERNEXEC
+ ---help---
+ This enables the kernel to use EFI runtime services that are
+ available (such as the EFI variable services).
+@@ -1457,6 +1457,7 @@ config SECCOMP
+
+ config CC_STACKPROTECTOR
+ bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
++ depends on X86_64 || !PAX_MEMORY_UDEREF
+ ---help---
+ This option turns on the -fstack-protector GCC feature. This
+ feature puts, at the beginning of functions, a canary value on
+@@ -1514,6 +1515,7 @@ config KEXEC_JUMP
+ config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+ default "0x1000000"
++ range 0x400000 0x40000000
+ ---help---
+ This gives the physical address where the kernel is loaded.
+
+@@ -1577,6 +1579,7 @@ config X86_NEED_RELOCS
+ config PHYSICAL_ALIGN
+ hex "Alignment value to which kernel should be aligned" if X86_32
+ default "0x1000000"
++ range 0x400000 0x1000000 if PAX_KERNEXEC
+ range 0x2000 0x1000000
+ ---help---
+ This value puts the alignment restrictions on physical address
+@@ -1608,9 +1611,10 @@ config HOTPLUG_CPU
+ Say N if you want to disable CPU hotplug.
+
+ config COMPAT_VDSO
+- def_bool y
++ def_bool n
+ prompt "Compat VDSO support"
+ depends on X86_32 || IA32_EMULATION
++ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
+ ---help---
+ Map the 32-bit VDSO to the predictable old-style address too.
+
+diff -urNp linux-2.6.37/arch/x86/Kconfig.cpu linux-2.6.37/arch/x86/Kconfig.cpu
+--- linux-2.6.37/arch/x86/Kconfig.cpu 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/Kconfig.cpu 2011-01-17 02:41:01.000000000 -0500
+@@ -336,7 +336,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
+
+ config X86_INVD_BUG
+ def_bool y
+@@ -360,7 +360,7 @@ config X86_POPAD_OK
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+@@ -406,7 +406,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+diff -urNp linux-2.6.37/arch/x86/Kconfig.debug linux-2.6.37/arch/x86/Kconfig.debug
+--- linux-2.6.37/arch/x86/Kconfig.debug 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/Kconfig.debug 2011-01-17 02:41:01.000000000 -0500
+@@ -101,7 +101,7 @@ config X86_PTDUMP
+ config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ default y
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && BROKEN
+ ---help---
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+diff -urNp linux-2.6.37/arch/x86/kernel/acpi/boot.c linux-2.6.37/arch/x86/kernel/acpi/boot.c
+--- linux-2.6.37/arch/x86/kernel/acpi/boot.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/acpi/boot.c 2011-01-17 02:41:01.000000000 -0500
+@@ -1498,7 +1498,7 @@ static struct dmi_system_id __initdata a
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
},
},
@@ -10330,29 +10213,18 @@ index c05872a..eb10001 100644
};
/*
-diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
-index 580b4e2..e0a70ff 100644
---- a/arch/x86/kernel/acpi/realmode/wakeup.S
-+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
-@@ -104,7 +104,7 @@ _start:
- movl %eax, %ecx
- orl %edx, %ecx
- jz 1f
-- movl $0xc0000080, %ecx
-+ mov $MSR_EFER, %ecx
- wrmsr
- 1:
-
-diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
-index fcc3c61..13001d3 100644
---- a/arch/x86/kernel/acpi/sleep.c
-+++ b/arch/x86/kernel/acpi/sleep.c
-@@ -11,11 +11,12 @@
+diff -urNp linux-2.6.37/arch/x86/kernel/acpi/sleep.c linux-2.6.37/arch/x86/kernel/acpi/sleep.c
+--- linux-2.6.37/arch/x86/kernel/acpi/sleep.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/acpi/sleep.c 2011-01-17 02:41:01.000000000 -0500
+@@ -12,6 +12,7 @@
#include <linux/cpumask.h>
#include <asm/segment.h>
#include <asm/desc.h>
+#include <asm/e820.h>
+ #ifdef CONFIG_X86_32
+ #include <asm/pgtable.h>
+@@ -20,7 +21,7 @@
#include "realmode/wakeup.h"
#include "sleep.h"
@@ -10361,7 +10233,7 @@ index fcc3c61..13001d3 100644
unsigned long acpi_realmode_flags;
/* address in low memory of the wakeup routine. */
-@@ -96,8 +97,12 @@ int acpi_save_state_mem(void)
+@@ -101,8 +102,12 @@ int acpi_save_state_mem(void)
header->trampoline_segment = setup_trampoline() >> 4;
#ifdef CONFIG_SMP
stack_start.sp = temp_stack + sizeof(temp_stack);
@@ -10374,10 +10246,9 @@ index fcc3c61..13001d3 100644
initial_gs = per_cpu_offset(smp_processor_id());
#endif
initial_code = (unsigned long)wakeup_long64;
-diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
-index 13ab720..95d5442 100644
---- a/arch/x86/kernel/acpi/wakeup_32.S
-+++ b/arch/x86/kernel/acpi/wakeup_32.S
+diff -urNp linux-2.6.37/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.37/arch/x86/kernel/acpi/wakeup_32.S
+--- linux-2.6.37/arch/x86/kernel/acpi/wakeup_32.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/acpi/wakeup_32.S 2011-01-17 02:41:01.000000000 -0500
@@ -30,13 +30,11 @@ wakeup_pmode_return:
# and restore the stack ... but you need gdt for this to work
movl saved_context_esp, %esp
@@ -10394,11 +10265,10 @@ index 13ab720..95d5442 100644
bogus_magic:
jmp bogus_magic
-diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
-index 7023773..a29b372 100644
---- a/arch/x86/kernel/alternative.c
-+++ b/arch/x86/kernel/alternative.c
-@@ -247,7 +247,7 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
+diff -urNp linux-2.6.37/arch/x86/kernel/alternative.c linux-2.6.37/arch/x86/kernel/alternative.c
+--- linux-2.6.37/arch/x86/kernel/alternative.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/alternative.c 2011-01-17 02:41:01.000000000 -0500
+@@ -248,7 +248,7 @@ static void alternatives_smp_lock(const
if (!*poff || ptr < text || ptr >= text_end)
continue;
/* turn DS segment override prefix into lock prefix */
@@ -10407,7 +10277,7 @@ index 7023773..a29b372 100644
text_poke(ptr, ((unsigned char []){0xf0}), 1);
};
mutex_unlock(&text_mutex);
-@@ -268,7 +268,7 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
+@@ -269,7 +269,7 @@ static void alternatives_smp_unlock(cons
if (!*poff || ptr < text || ptr >= text_end)
continue;
/* turn lock prefix into DS segment override prefix */
@@ -10416,7 +10286,7 @@ index 7023773..a29b372 100644
text_poke(ptr, ((unsigned char []){0x3E}), 1);
};
mutex_unlock(&text_mutex);
-@@ -436,7 +436,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
+@@ -437,7 +437,7 @@ void __init_or_module apply_paravirt(str
BUG_ON(p->len > MAX_PATCH_LEN);
/* prep the buffer with the original instructions */
@@ -10425,7 +10295,7 @@ index 7023773..a29b372 100644
used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
(unsigned long)p->instr, p->len);
-@@ -504,7 +504,7 @@ void __init alternative_instructions(void)
+@@ -505,7 +505,7 @@ void __init alternative_instructions(voi
if (smp_alt_once)
free_init_pages("SMP alternatives",
(unsigned long)__smp_locks,
@@ -10434,12 +10304,12 @@ index 7023773..a29b372 100644
restart_nmi();
}
-@@ -521,13 +521,17 @@ void __init alternative_instructions(void)
+@@ -522,13 +522,17 @@ void __init alternative_instructions(voi
* instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch.
*/
--static void *__init_or_module text_poke_early(void *addr, const void *opcode,
-+static void *__kprobes text_poke_early(void *addr, const void *opcode,
+-void *__init_or_module text_poke_early(void *addr, const void *opcode,
++void *__kprobes text_poke_early(void *addr, const void *opcode,
size_t len)
{
unsigned long flags;
@@ -10454,7 +10324,7 @@ index 7023773..a29b372 100644
local_irq_restore(flags);
/* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs. */
-@@ -549,36 +553,22 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
+@@ -550,36 +554,22 @@ void *__init_or_module text_poke_early(v
*/
void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
{
@@ -10495,15 +10365,26 @@ index 7023773..a29b372 100644
for (i = 0; i < len; i++)
- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
- local_irq_restore(flags);
-+ BUG_ON(((char *)vaddr)[i] != ((char *)opcode)[i]);
++ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
return addr;
}
-diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
-index 4424c73..e90c367 100644
---- a/arch/x86/kernel/amd_iommu.c
-+++ b/arch/x86/kernel/amd_iommu.c
-@@ -2286,7 +2286,7 @@ static void prealloc_protection_domains(void)
+@@ -645,9 +635,9 @@ void *__kprobes text_poke_smp(void *addr
+ #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
+
+ #ifdef CONFIG_X86_64
+-unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
++unsigned char ideal_nop5[5] __read_only = { 0x66, 0x66, 0x66, 0x66, 0x90 };
+ #else
+-unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
++unsigned char ideal_nop5[5] __read_only = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
+ #endif
+
+ void __init arch_init_ideal_nop5(void)
+diff -urNp linux-2.6.37/arch/x86/kernel/amd_iommu.c linux-2.6.37/arch/x86/kernel/amd_iommu.c
+--- linux-2.6.37/arch/x86/kernel/amd_iommu.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/amd_iommu.c 2011-01-17 02:41:01.000000000 -0500
+@@ -2286,7 +2286,7 @@ static void prealloc_protection_domains(
}
}
@@ -10512,20 +10393,19 @@ index 4424c73..e90c367 100644
.alloc_coherent = alloc_coherent,
.free_coherent = free_coherent,
.map_page = map_page,
-diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
-index ad1515d..96832b6 100644
---- a/arch/x86/kernel/apic/io_apic.c
-+++ b/arch/x86/kernel/apic/io_apic.c
-@@ -696,7 +696,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
+diff -urNp linux-2.6.37/arch/x86/kernel/apic/io_apic.c linux-2.6.37/arch/x86/kernel/apic/io_apic.c
+--- linux-2.6.37/arch/x86/kernel/apic/io_apic.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/apic/io_apic.c 2011-01-17 02:41:01.000000000 -0500
+@@ -597,7 +597,7 @@ struct IO_APIC_route_entry **alloc_ioapi
ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!ioapic_entries)
- return 0;
+ return NULL;
for (apic = 0; apic < nr_ioapics; apic++) {
ioapic_entries[apic] =
-@@ -713,7 +713,7 @@ nomem:
+@@ -614,7 +614,7 @@ nomem:
kfree(ioapic_entries[apic]);
kfree(ioapic_entries);
@@ -10534,7 +10414,7 @@ index ad1515d..96832b6 100644
}
/*
-@@ -1123,7 +1123,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
+@@ -1024,7 +1024,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
}
EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
@@ -10543,7 +10423,7 @@ index ad1515d..96832b6 100644
{
/* Used to the online set of cpus does not change
* during assign_irq_vector.
-@@ -1131,7 +1131,7 @@ void lock_vector_lock(void)
+@@ -1032,7 +1032,7 @@ void lock_vector_lock(void)
raw_spin_lock(&vector_lock);
}
@@ -10552,10 +10432,9 @@ index ad1515d..96832b6 100644
{
raw_spin_unlock(&vector_lock);
}
-diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
-index c4f9182..9e252a4 100644
---- a/arch/x86/kernel/apm_32.c
-+++ b/arch/x86/kernel/apm_32.c
+diff -urNp linux-2.6.37/arch/x86/kernel/apm_32.c linux-2.6.37/arch/x86/kernel/apm_32.c
+--- linux-2.6.37/arch/x86/kernel/apm_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/apm_32.c 2011-01-17 02:41:01.000000000 -0500
@@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
* This is for buggy BIOS's that refer to (real mode) segment 0x40
* even though they are called in protected mode.
@@ -10588,7 +10467,7 @@ index c4f9182..9e252a4 100644
put_cpu();
return call->eax & 0xff;
-@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
+@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
BUG_ON(cpu != 0);
gdt = get_cpu_gdt_table(cpu);
save_desc_40 = gdt[0x40 / 8];
@@ -10599,7 +10478,7 @@ index c4f9182..9e252a4 100644
apm_irq_save(flags);
APM_DO_SAVE_SEGS;
-@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
+@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
&call->eax);
APM_DO_RESTORE_SEGS;
apm_irq_restore(flags);
@@ -10620,7 +10499,7 @@ index c4f9182..9e252a4 100644
0xb8, 0x00, 0x10, /* movw $0x1000,ax */
0x8e, 0xd0, /* movw ax,ss */
0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
-@@ -1931,7 +1945,10 @@ static const struct file_operations apm_bios_fops = {
+@@ -1932,7 +1946,10 @@ static const struct file_operations apm_
static struct miscdevice apm_device = {
APM_MINOR_DEV,
"apm_bios",
@@ -10632,7 +10511,7 @@ index c4f9182..9e252a4 100644
};
-@@ -2252,7 +2269,7 @@ static struct dmi_system_id __initdata apm_dmi_table[] = {
+@@ -2253,7 +2270,7 @@ static struct dmi_system_id __initdata a
{ DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
},
@@ -10641,7 +10520,7 @@ index c4f9182..9e252a4 100644
};
/*
-@@ -2355,12 +2372,15 @@ static int __init apm_init(void)
+@@ -2356,12 +2373,15 @@ static int __init apm_init(void)
* code to that CPU.
*/
gdt = get_cpu_gdt_table(0);
@@ -10657,11 +10536,10 @@ index c4f9182..9e252a4 100644
proc_create("apm", 0, NULL, &apm_file_ops);
-diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
-index dfdbf64..5b9f997 100644
---- a/arch/x86/kernel/asm-offsets_32.c
-+++ b/arch/x86/kernel/asm-offsets_32.c
-@@ -115,6 +115,11 @@ void foo(void)
+diff -urNp linux-2.6.37/arch/x86/kernel/asm-offsets_32.c linux-2.6.37/arch/x86/kernel/asm-offsets_32.c
+--- linux-2.6.37/arch/x86/kernel/asm-offsets_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/asm-offsets_32.c 2011-01-17 02:41:01.000000000 -0500
+@@ -113,6 +113,11 @@ void foo(void)
OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
@@ -10673,10 +10551,9 @@ index dfdbf64..5b9f997 100644
#endif
#ifdef CONFIG_XEN
-diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
-index 4a6aeed..31b7fb8 100644
---- a/arch/x86/kernel/asm-offsets_64.c
-+++ b/arch/x86/kernel/asm-offsets_64.c
+diff -urNp linux-2.6.37/arch/x86/kernel/asm-offsets_64.c linux-2.6.37/arch/x86/kernel/asm-offsets_64.c
+--- linux-2.6.37/arch/x86/kernel/asm-offsets_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/asm-offsets_64.c 2011-01-17 02:41:01.000000000 -0500
@@ -63,6 +63,18 @@ int main(void)
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
@@ -10704,26 +10581,10 @@ index 4a6aeed..31b7fb8 100644
DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
BLANK();
DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
-diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
-index 3a785da..68d7133 100644
---- a/arch/x86/kernel/cpu/Makefile
-+++ b/arch/x86/kernel/cpu/Makefile
-@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
- CFLAGS_REMOVE_perf_event.o = -pg
- endif
-
--# Make sure load_percpu_segment has no stackprotector
--nostackp := $(call cc-option, -fno-stack-protector)
--CFLAGS_common.o := $(nostackp)
--
- obj-y := intel_cacheinfo.o addon_cpuid_features.o
- obj-y += proc.o capflags.o powerflags.o common.o
- obj-y += vmware.o hypervisor.o sched.o mshyperv.o
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index d938871..1e97c91 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
+diff -urNp linux-2.6.37/arch/x86/kernel/cpu/common.c linux-2.6.37/arch/x86/kernel/cpu/common.c
+--- linux-2.6.37/arch/x86/kernel/cpu/common.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/cpu/common.c 2011-01-17 02:41:01.000000000 -0500
+@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
@@ -10784,7 +10645,7 @@ index d938871..1e97c91 100644
static int __init x86_xsave_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
+@@ -352,7 +298,7 @@ void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
@@ -10793,7 +10654,7 @@ index d938871..1e97c91 100644
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
/* Reload the per-cpu base */
-@@ -802,6 +748,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+@@ -825,6 +771,10 @@ static void __cpuinit identify_cpu(struc
/* Filter out anything that depends on CPUID levels we don't have */
filter_cpuid_features(c, true);
@@ -10804,7 +10665,16 @@ index d938871..1e97c91 100644
/* If the model name is still unset, do table lookup. */
if (!c->x86_model_id[0]) {
const char *p;
-@@ -1117,7 +1067,7 @@ void __cpuinit cpu_init(void)
+@@ -1085,7 +1035,7 @@ struct pt_regs * __cpuinit idle_regs(str
+ {
+ memset(regs, 0, sizeof(struct pt_regs));
+ regs->fs = __KERNEL_PERCPU;
+- regs->gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, regs->gs);
+
+ return regs;
+ }
+@@ -1140,7 +1090,7 @@ void __cpuinit cpu_init(void)
int i;
cpu = stack_smp_processor_id();
@@ -10813,7 +10683,7 @@ index d938871..1e97c91 100644
oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
-@@ -1143,7 +1093,7 @@ void __cpuinit cpu_init(void)
+@@ -1166,7 +1116,7 @@ void __cpuinit cpu_init(void)
switch_to_new_gdt(cpu);
loadsegment(fs, 0);
@@ -10822,7 +10692,15 @@ index d938871..1e97c91 100644
memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
syscall_init();
-@@ -1205,7 +1155,7 @@ void __cpuinit cpu_init(void)
+@@ -1175,7 +1125,6 @@ void __cpuinit cpu_init(void)
+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
+ barrier();
+
+- x86_configure_nx();
+ if (cpu != 0)
+ enable_x2apic();
+
+@@ -1229,7 +1178,7 @@ void __cpuinit cpu_init(void)
{
int cpu = smp_processor_id();
struct task_struct *curr = current;
@@ -10831,11 +10709,10 @@ index d938871..1e97c91 100644
struct thread_struct *thread = &curr->thread;
if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
-diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
-index 5384b04..0c28c59 100644
---- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
-+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
-@@ -484,7 +484,7 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = {
+diff -urNp linux-2.6.37/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c linux-2.6.37/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+--- linux-2.6.37/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2011-01-17 02:41:01.000000000 -0500
+@@ -481,7 +481,7 @@ static const struct dmi_system_id sw_any
DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
},
},
@@ -10844,10 +10721,9 @@ index 5384b04..0c28c59 100644
};
static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
-diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
-index 9b1ff37..3c1bac4 100644
---- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
-+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+diff -urNp linux-2.6.37/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c linux-2.6.37/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+--- linux-2.6.37/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2011-01-17 02:41:01.000000000 -0500
@@ -226,7 +226,7 @@ static struct cpu_model models[] =
{ &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
{ &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
@@ -10857,11 +10733,10 @@ index 9b1ff37..3c1bac4 100644
};
#undef _BANIAS
#undef BANIAS
-diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
-index b438944..aa576fa 100644
---- a/arch/x86/kernel/cpu/intel.c
-+++ b/arch/x86/kernel/cpu/intel.c
-@@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug(void)
+diff -urNp linux-2.6.37/arch/x86/kernel/cpu/intel.c linux-2.6.37/arch/x86/kernel/cpu/intel.c
+--- linux-2.6.37/arch/x86/kernel/cpu/intel.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/cpu/intel.c 2011-01-17 02:41:01.000000000 -0500
+@@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug
* Update the IDT descriptor and reload the IDT so that
* it uses the read-only mapped virtual address.
*/
@@ -10870,11 +10745,32 @@ index b438944..aa576fa 100644
load_idt(&idt_descr);
}
#endif
-diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
-index 18cc425..ebfbac1 100644
---- a/arch/x86/kernel/cpu/mcheck/mce.c
-+++ b/arch/x86/kernel/cpu/mcheck/mce.c
-@@ -219,7 +219,7 @@ static void print_mce(struct mce *m)
+diff -urNp linux-2.6.37/arch/x86/kernel/cpu/Makefile linux-2.6.37/arch/x86/kernel/cpu/Makefile
+--- linux-2.6.37/arch/x86/kernel/cpu/Makefile 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/cpu/Makefile 2011-01-17 02:41:01.000000000 -0500
+@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
+ CFLAGS_REMOVE_perf_event.o = -pg
+ endif
+
+-# Make sure load_percpu_segment has no stackprotector
+-nostackp := $(call cc-option, -fno-stack-protector)
+-CFLAGS_common.o := $(nostackp)
+-
+ obj-y := intel_cacheinfo.o scattered.o topology.o
+ obj-y += proc.o capflags.o powerflags.o common.o
+ obj-y += vmware.o hypervisor.o sched.o mshyperv.o
+diff -urNp linux-2.6.37/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.37/arch/x86/kernel/cpu/mcheck/mce.c
+--- linux-2.6.37/arch/x86/kernel/cpu/mcheck/mce.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/cpu/mcheck/mce.c 2011-01-25 20:24:56.000000000 -0500
+@@ -45,6 +45,7 @@
+ #include <asm/ipi.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/local.h>
+
+ #include "mce-internal.h"
+
+@@ -219,7 +220,7 @@ static void print_mce(struct mce *m)
!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
m->cs, m->ip);
@@ -10883,12 +10779,12 @@ index 18cc425..ebfbac1 100644
print_symbol("{%s}", m->ip);
pr_cont("\n");
}
-@@ -1471,14 +1471,14 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
+@@ -1460,14 +1461,14 @@ void __cpuinit mcheck_cpu_init(struct cp
*/
static DEFINE_SPINLOCK(mce_state_lock);
-static int open_count; /* #times opened */
-+static atomic_t open_count; /* #times opened */
++static local_t open_count; /* #times opened */
static int open_exclu; /* already open exclusive? */
static int mce_open(struct inode *inode, struct file *file)
@@ -10896,29 +10792,29 @@ index 18cc425..ebfbac1 100644
spin_lock(&mce_state_lock);
- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
-+ if (open_exclu || (atomic_read(&open_count) && (file->f_flags & O_EXCL))) {
++ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
spin_unlock(&mce_state_lock);
return -EBUSY;
-@@ -1486,7 +1486,7 @@ static int mce_open(struct inode *inode, struct file *file)
+@@ -1475,7 +1476,7 @@ static int mce_open(struct inode *inode,
if (file->f_flags & O_EXCL)
open_exclu = 1;
- open_count++;
-+ atomic_inc(&open_count);
++ local_inc(&open_count);
spin_unlock(&mce_state_lock);
-@@ -1497,7 +1497,7 @@ static int mce_release(struct inode *inode, struct file *file)
+@@ -1486,7 +1487,7 @@ static int mce_release(struct inode *ino
{
spin_lock(&mce_state_lock);
- open_count--;
-+ atomic_dec(&open_count);
++ local_dec(&open_count);
open_exclu = 0;
spin_unlock(&mce_state_lock);
-@@ -1683,6 +1683,7 @@ static struct miscdevice mce_log_device = {
+@@ -1673,6 +1674,7 @@ static struct miscdevice mce_log_device
MISC_MCELOG_MINOR,
"mcelog",
&mce_chrdev_ops,
@@ -10926,11 +10822,10 @@ index 18cc425..ebfbac1 100644
};
/*
-diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
-index fd31a44..e4817d8 100644
---- a/arch/x86/kernel/cpu/mtrr/generic.c
-+++ b/arch/x86/kernel/cpu/mtrr/generic.c
-@@ -28,7 +28,7 @@ static struct fixed_range_block fixed_range_blocks[] = {
+diff -urNp linux-2.6.37/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.37/arch/x86/kernel/cpu/mtrr/generic.c
+--- linux-2.6.37/arch/x86/kernel/cpu/mtrr/generic.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/cpu/mtrr/generic.c 2011-01-17 02:41:01.000000000 -0500
+@@ -28,7 +28,7 @@ static struct fixed_range_block fixed_ra
{ MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
{ MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
{ MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
@@ -10939,10 +10834,9 @@ index fd31a44..e4817d8 100644
};
static unsigned long smp_changes_mask;
-diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
-index 01c0f3e..0e18a24 100644
---- a/arch/x86/kernel/cpu/mtrr/main.c
-+++ b/arch/x86/kernel/cpu/mtrr/main.c
+diff -urNp linux-2.6.37/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.37/arch/x86/kernel/cpu/mtrr/main.c
+--- linux-2.6.37/arch/x86/kernel/cpu/mtrr/main.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/cpu/mtrr/main.c 2011-01-17 02:41:01.000000000 -0500
@@ -61,7 +61,7 @@ static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask;
static bool mtrr_aps_delayed_init;
@@ -10952,10 +10846,9 @@ index 01c0f3e..0e18a24 100644
const struct mtrr_ops *mtrr_if;
-diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
-index df5e41f..d94607c 100644
---- a/arch/x86/kernel/cpu/mtrr/mtrr.h
-+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
+diff -urNp linux-2.6.37/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.37/arch/x86/kernel/cpu/mtrr/mtrr.h
+--- linux-2.6.37/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-01-17 02:41:01.000000000 -0500
@@ -12,19 +12,19 @@
extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
@@ -10984,23 +10877,9 @@ index df5e41f..d94607c 100644
};
extern int generic_get_free_region(unsigned long base, unsigned long size,
-diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
-index 5db5b7d..1e1f17e 100644
---- a/arch/x86/kernel/cpu/perf_event.c
-+++ b/arch/x86/kernel/cpu/perf_event.c
-@@ -1685,7 +1685,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
- break;
-
- callchain_store(entry, frame.return_address);
-- fp = frame.next_frame;
-+ fp = (__force const void __user *)frame.next_frame;
- }
- }
-
-diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
-index fb329e9..ab40f2d 100644
---- a/arch/x86/kernel/cpu/perfctr-watchdog.c
-+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
+diff -urNp linux-2.6.37/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.37/arch/x86/kernel/cpu/perfctr-watchdog.c
+--- linux-2.6.37/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-01-17 02:41:01.000000000 -0500
@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
/* Interface defining a CPU specific perfctr watchdog */
@@ -11026,7 +10905,7 @@ index fb329e9..ab40f2d 100644
static struct wd_ops intel_arch_wd_ops;
static int setup_intel_arch_watchdog(unsigned nmi_hz)
-@@ -686,6 +687,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
+@@ -686,6 +687,7 @@ static int setup_intel_arch_watchdog(uns
return 1;
}
@@ -11034,11 +10913,22 @@ index fb329e9..ab40f2d 100644
static struct wd_ops intel_arch_wd_ops __read_mostly = {
.reserve = single_msr_reserve,
.unreserve = single_msr_unreserve,
-diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
-index ebd4c51..a345a28 100644
---- a/arch/x86/kernel/crash.c
-+++ b/arch/x86/kernel/crash.c
-@@ -40,7 +40,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
+diff -urNp linux-2.6.37/arch/x86/kernel/cpu/perf_event.c linux-2.6.37/arch/x86/kernel/cpu/perf_event.c
+--- linux-2.6.37/arch/x86/kernel/cpu/perf_event.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/cpu/perf_event.c 2011-01-17 02:41:01.000000000 -0500
+@@ -1757,7 +1757,7 @@ perf_callchain_user(struct perf_callchai
+ break;
+
+ perf_callchain_store(entry, frame.return_address);
+- fp = frame.next_frame;
++ fp = (__force const void __user *)frame.next_frame;
+ }
+ }
+
+diff -urNp linux-2.6.37/arch/x86/kernel/crash.c linux-2.6.37/arch/x86/kernel/crash.c
+--- linux-2.6.37/arch/x86/kernel/crash.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/crash.c 2011-01-17 02:41:01.000000000 -0500
+@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
regs = args->regs;
#ifdef CONFIG_X86_32
@@ -11047,10 +10937,9 @@ index ebd4c51..a345a28 100644
crash_fixup_ss_esp(&fixed_regs, regs);
regs = &fixed_regs;
}
-diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
-index 37250fe..bf2ec74 100644
---- a/arch/x86/kernel/doublefault_32.c
-+++ b/arch/x86/kernel/doublefault_32.c
+diff -urNp linux-2.6.37/arch/x86/kernel/doublefault_32.c linux-2.6.37/arch/x86/kernel/doublefault_32.c
+--- linux-2.6.37/arch/x86/kernel/doublefault_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/doublefault_32.c 2011-01-17 02:41:01.000000000 -0500
@@ -11,7 +11,7 @@
#define DOUBLEFAULT_STACKSIZE (1024)
@@ -11069,7 +10958,7 @@ index 37250fe..bf2ec74 100644
printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
-@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
+@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
/* 0x2 bit is always set */
.flags = X86_EFLAGS_SF | 0x2,
.sp = STACK_START,
@@ -11082,51 +10971,10 @@ index 37250fe..bf2ec74 100644
.fs = __KERNEL_PERCPU,
.__cr3 = __pa_nodebug(swapper_pg_dir),
-diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
-index c89a386..10eb36b 100644
---- a/arch/x86/kernel/dumpstack.c
-+++ b/arch/x86/kernel/dumpstack.c
-@@ -207,7 +207,7 @@ void dump_stack(void)
- #endif
-
- printk("Pid: %d, comm: %.20s %s %s %.*s\n",
-- current->pid, current->comm, print_tainted(),
-+ task_pid_nr(current), current->comm, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
-@@ -263,7 +263,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
- panic("Fatal exception in interrupt");
- if (panic_on_oops)
- panic("Fatal exception");
-- do_exit(signr);
-+ do_group_exit(signr);
- }
-
- int __kprobes __die(const char *str, struct pt_regs *regs, long err)
-@@ -290,7 +290,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
-
- show_registers(regs);
- #ifdef CONFIG_X86_32
-- if (user_mode_vm(regs)) {
-+ if (user_mode(regs)) {
- sp = regs->sp;
- ss = regs->ss & 0xffff;
- } else {
-@@ -318,7 +318,7 @@ void die(const char *str, struct pt_regs *regs, long err)
- unsigned long flags = oops_begin();
- int sig = SIGSEGV;
-
-- if (!user_mode_vm(regs))
-+ if (!user_mode(regs))
- report_bug(regs->ip, regs);
-
- if (__die(str, regs, err))
-diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
-index 11540a1..285b0ef 100644
---- a/arch/x86/kernel/dumpstack_32.c
-+++ b/arch/x86/kernel/dumpstack_32.c
-@@ -107,11 +107,12 @@ void show_registers(struct pt_regs *regs)
+diff -urNp linux-2.6.37/arch/x86/kernel/dumpstack_32.c linux-2.6.37/arch/x86/kernel/dumpstack_32.c
+--- linux-2.6.37/arch/x86/kernel/dumpstack_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/dumpstack_32.c 2011-01-17 02:41:01.000000000 -0500
+@@ -105,11 +105,12 @@ void show_registers(struct pt_regs *regs
* When in-kernel, we also print out the stack and code at the
* time of the fault..
*/
@@ -11140,7 +10988,7 @@ index 11540a1..285b0ef 100644
printk(KERN_EMERG "Stack:\n");
show_stack_log_lvl(NULL, regs, &regs->sp,
-@@ -119,10 +120,10 @@ void show_registers(struct pt_regs *regs)
+@@ -117,10 +118,10 @@ void show_registers(struct pt_regs *regs
printk(KERN_EMERG "Code: ");
@@ -11153,7 +11001,7 @@ index 11540a1..285b0ef 100644
code_len = code_len - code_prologue + 1;
}
for (i = 0; i < code_len; i++, ip++) {
-@@ -131,7 +132,7 @@ void show_registers(struct pt_regs *regs)
+@@ -129,7 +130,7 @@ void show_registers(struct pt_regs *regs
printk(" Bad EIP value.");
break;
}
@@ -11162,7 +11010,7 @@ index 11540a1..285b0ef 100644
printk("<%02x> ", c);
else
printk("%02x ", c);
-@@ -144,6 +145,7 @@ int is_valid_bugaddr(unsigned long ip)
+@@ -142,6 +143,7 @@ int is_valid_bugaddr(unsigned long ip)
{
unsigned short ud2;
@@ -11170,197 +11018,82 @@ index 11540a1..285b0ef 100644
if (ip < PAGE_OFFSET)
return 0;
if (probe_kernel_address((unsigned short *)ip, ud2))
-diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
-index 5cab48e..abf8ca2 100644
---- a/arch/x86/kernel/efi_32.c
-+++ b/arch/x86/kernel/efi_32.c
-@@ -38,70 +38,38 @@
+diff -urNp linux-2.6.37/arch/x86/kernel/dumpstack.c linux-2.6.37/arch/x86/kernel/dumpstack.c
+--- linux-2.6.37/arch/x86/kernel/dumpstack.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/dumpstack.c 2011-01-17 02:41:01.000000000 -0500
+@@ -2,6 +2,9 @@
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
*/
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kallsyms.h>
+ #include <linux/kprobes.h>
+ #include <linux/uaccess.h>
+@@ -27,7 +30,7 @@ static int die_counter;
- static unsigned long efi_rt_eflags;
--static pgd_t efi_bak_pg_dir_pointer[2];
-+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
-
--void efi_call_phys_prelog(void)
-+void __init efi_call_phys_prelog(void)
+ void printk_address(unsigned long address, int reliable)
{
-- unsigned long cr4;
-- unsigned long temp;
- struct desc_ptr gdt_descr;
-
- local_irq_save(efi_rt_eflags);
-
-- /*
-- * If I don't have PAE, I should just duplicate two entries in page
-- * directory. If I have PAE, I just need to duplicate one entry in
-- * page directory.
-- */
-- cr4 = read_cr4_safe();
-
-- if (cr4 & X86_CR4_PAE) {
-- efi_bak_pg_dir_pointer[0].pgd =
-- swapper_pg_dir[pgd_index(0)].pgd;
-- swapper_pg_dir[0].pgd =
-- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-- } else {
-- efi_bak_pg_dir_pointer[0].pgd =
-- swapper_pg_dir[pgd_index(0)].pgd;
-- efi_bak_pg_dir_pointer[1].pgd =
-- swapper_pg_dir[pgd_index(0x400000)].pgd;
-- swapper_pg_dir[pgd_index(0)].pgd =
-- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-- temp = PAGE_OFFSET + 0x400000;
-- swapper_pg_dir[pgd_index(0x400000)].pgd =
-- swapper_pg_dir[pgd_index(temp)].pgd;
-- }
-+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
-+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
-
- /*
- * After the lock is released, the original page table is restored.
- */
- __flush_tlb_all();
-
-- gdt_descr.address = __pa(get_cpu_gdt_table(0));
-+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
- gdt_descr.size = GDT_SIZE - 1;
- load_gdt(&gdt_descr);
+- printk(" [<%p>] %s%pS\n", (void *) address,
++ printk(" [<%p>] %s%pA\n", (void *) address,
+ reliable ? "" : "? ", (void *) address);
}
--void efi_call_phys_epilog(void)
-+void __init efi_call_phys_epilog(void)
- {
-- unsigned long cr4;
- struct desc_ptr gdt_descr;
-
-- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
-+ gdt_descr.address = get_cpu_gdt_table(0);
- gdt_descr.size = GDT_SIZE - 1;
- load_gdt(&gdt_descr);
-
-- cr4 = read_cr4_safe();
--
-- if (cr4 & X86_CR4_PAE) {
-- swapper_pg_dir[pgd_index(0)].pgd =
-- efi_bak_pg_dir_pointer[0].pgd;
-- } else {
-- swapper_pg_dir[pgd_index(0)].pgd =
-- efi_bak_pg_dir_pointer[0].pgd;
-- swapper_pg_dir[pgd_index(0x400000)].pgd =
-- efi_bak_pg_dir_pointer[1].pgd;
-- }
-+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
-
- /*
- * After the lock is released, the original page table is restored.
-diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
-index fbe66e6..1f61a01 100644
---- a/arch/x86/kernel/efi_stub_32.S
-+++ b/arch/x86/kernel/efi_stub_32.S
-@@ -6,6 +6,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <linux/init.h>
- #include <asm/page_types.h>
+@@ -206,7 +209,7 @@ void dump_stack(void)
+ #endif
- /*
-@@ -20,7 +21,7 @@
- * service functions will comply with gcc calling convention, too.
- */
+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -262,7 +265,7 @@ void __kprobes oops_end(unsigned long fl
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+- do_exit(signr);
++ do_group_exit(signr);
+ }
--.text
-+__INIT
- ENTRY(efi_call_phys)
- /*
- * 0. The function can only be called in Linux kernel. So CS has been
-@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
- * The mapping of lower virtual memory has been created in prelog and
- * epilog.
- */
-- movl $1f, %edx
-- subl $__PAGE_OFFSET, %edx
-- jmp *%edx
-+ jmp 1f-__PAGE_OFFSET
- 1:
+ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+@@ -289,7 +292,7 @@ int __kprobes __die(const char *str, str
- /*
-@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
- * parameter 2, ..., param n. To make things easy, we save the return
- * address of efi_call_phys in a global variable.
- */
-- popl %edx
-- movl %edx, saved_return_addr
-- /* get the function pointer into ECX*/
-- popl %ecx
-- movl %ecx, efi_rt_function_ptr
-- movl $2f, %edx
-- subl $__PAGE_OFFSET, %edx
-- pushl %edx
-+ popl (saved_return_addr)
-+ popl (efi_rt_function_ptr)
+ show_registers(regs);
+ #ifdef CONFIG_X86_32
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ sp = regs->sp;
+ ss = regs->ss & 0xffff;
+ } else {
+@@ -317,7 +320,7 @@ void die(const char *str, struct pt_regs
+ unsigned long flags = oops_begin();
+ int sig = SIGSEGV;
- /*
- * 3. Clear PG bit in %CR0.
-@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
- /*
- * 5. Call the physical function.
- */
-- jmp *%ecx
-+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ report_bug(regs->ip, regs);
--2:
- /*
- * 6. After EFI runtime service returns, control will return to
- * following instruction. We'd better readjust stack pointer first.
-@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
- movl %cr0, %edx
- orl $0x80000000, %edx
- movl %edx, %cr0
-- jmp 1f
--1:
+ if (__die(str, regs, err))
+diff -urNp linux-2.6.37/arch/x86/kernel/entry_32.S linux-2.6.37/arch/x86/kernel/entry_32.S
+--- linux-2.6.37/arch/x86/kernel/entry_32.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/entry_32.S 2011-01-17 02:41:01.000000000 -0500
+@@ -183,13 +183,81 @@
+ /*CFI_REL_OFFSET gs, PT_GS*/
+ .endm
+ .macro SET_KERNEL_GS reg
+
- /*
- * 8. Now restore the virtual mode from flat mode by
- * adding EIP with PAGE_OFFSET.
- */
-- movl $1f, %edx
-- jmp *%edx
-+ jmp 1f+__PAGE_OFFSET
- 1:
-
- /*
- * 9. Balance the stack. And because EAX contain the return value,
- * we'd better not clobber it.
- */
-- leal efi_rt_function_ptr, %edx
-- movl (%edx), %ecx
-- pushl %ecx
-+ pushl (efi_rt_function_ptr)
-
- /*
-- * 10. Push the saved return address onto the stack and return.
-+ * 10. Return to the saved return address.
- */
-- leal saved_return_addr, %edx
-- movl (%edx), %ecx
-- pushl %ecx
-- ret
-+ jmpl *(saved_return_addr)
- ENDPROC(efi_call_phys)
- .previous
-
--.data
-+__INITDATA
- saved_return_addr:
- .long 0
- efi_rt_function_ptr:
-diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
-index cd49141..cc0b142 100644
---- a/arch/x86/kernel/entry_32.S
-+++ b/arch/x86/kernel/entry_32.S
-@@ -192,7 +192,67 @@
++#ifdef CONFIG_CC_STACKPROTECTOR
+ movl $(__KERNEL_STACK_CANARY), \reg
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS), \reg
++#else
++ xorl \reg, \reg
++#endif
++
+ movl \reg, %gs
+ .endm
#endif /* CONFIG_X86_32_LAZY_GS */
@@ -11368,7 +11101,7 @@ index cd49141..cc0b142 100644
+.macro PAX_EXIT_KERNEL
+#ifdef CONFIG_PAX_KERNEXEC
+#ifdef CONFIG_PARAVIRT
-+ push %eax; push %ecx;
++ push %eax; push %ecx
+#endif
+ mov %cs, %esi
+ cmp $__KERNEXEC_KERNEL_CS, %esi
@@ -11398,7 +11131,7 @@ index cd49141..cc0b142 100644
+.macro PAX_ENTER_KERNEL
+#ifdef CONFIG_PAX_KERNEXEC
+#ifdef CONFIG_PARAVIRT
-+ push %eax; push %ecx;
++ push %eax; push %ecx
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
+ mov %eax, %esi
+#else
@@ -11428,17 +11161,17 @@ index cd49141..cc0b142 100644
+.macro __SAVE_ALL _DS
cld
PUSH_GS
- pushl %fs
-@@ -225,7 +285,7 @@
- pushl %ebx
- CFI_ADJUST_CFA_OFFSET 4
+ pushl_cfi %fs
+@@ -212,7 +280,7 @@
+ CFI_REL_OFFSET ecx, 0
+ pushl_cfi %ebx
CFI_REL_OFFSET ebx, 0
- movl $(__USER_DS), %edx
+ movl $\_DS, %edx
movl %edx, %ds
movl %edx, %es
movl $(__KERNEL_PERCPU), %edx
-@@ -233,6 +293,15 @@
+@@ -220,6 +288,15 @@
SET_KERNEL_GS %edx
.endm
@@ -11452,9 +11185,9 @@ index cd49141..cc0b142 100644
+.endm
+
.macro RESTORE_INT_REGS
- popl %ebx
- CFI_ADJUST_CFA_OFFSET -4
-@@ -357,7 +426,15 @@ check_userspace:
+ popl_cfi %ebx
+ CFI_RESTORE ebx
+@@ -330,7 +407,15 @@ check_userspace:
movb PT_CS(%esp), %al
andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
cmpl $USER_RPL, %eax
@@ -11470,20 +11203,20 @@ index cd49141..cc0b142 100644
ENTRY(resume_userspace)
LOCKDEP_SYS_EXIT
-@@ -423,10 +500,9 @@ sysenter_past_esp:
+@@ -392,10 +477,9 @@ sysenter_past_esp:
/*CFI_REL_OFFSET cs, 0*/
/*
* Push current_thread_info()->sysenter_return to the stack.
- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
- * pushed above; +8 corresponds to copy_thread's esp0 setting.
*/
-- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
+- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp)
+ GET_THREAD_INFO(%ebp)
-+ pushl TI_sysenter_return(%ebp)
- CFI_ADJUST_CFA_OFFSET 4
++ pushl_cfi TI_sysenter_return(%ebp)
CFI_REL_OFFSET eip, 0
-@@ -439,9 +515,19 @@ sysenter_past_esp:
+ pushl_cfi %eax
+@@ -406,9 +490,19 @@ sysenter_past_esp:
* Load the potential sixth argument from user stack.
* Careful about security.
*/
@@ -11503,7 +11236,7 @@ index cd49141..cc0b142 100644
movl %ebp,PT_EBP(%esp)
.section __ex_table,"a"
.align 4
-@@ -464,12 +550,23 @@ sysenter_do_call:
+@@ -431,12 +525,23 @@ sysenter_do_call:
testl $_TIF_ALLWORK_MASK, %ecx
jne sysexit_audit
sysenter_exit:
@@ -11527,7 +11260,7 @@ index cd49141..cc0b142 100644
PTGS_TO_GS
ENABLE_INTERRUPTS_SYSEXIT
-@@ -513,11 +610,17 @@ sysexit_audit:
+@@ -479,11 +584,17 @@ sysexit_audit:
CFI_ENDPROC
.pushsection .fixup,"ax"
@@ -11547,7 +11280,7 @@ index cd49141..cc0b142 100644
.popsection
PTGS_TO_GS_EX
ENDPROC(ia32_sysenter_target)
-@@ -551,6 +654,10 @@ syscall_exit:
+@@ -516,6 +627,10 @@ syscall_exit:
testl $_TIF_ALLWORK_MASK, %ecx # current->work
jne syscall_exit_work
@@ -11558,11 +11291,16 @@ index cd49141..cc0b142 100644
restore_all:
TRACE_IRQS_IRET
restore_all_notrace:
-@@ -615,7 +722,13 @@ ldt_ss:
+@@ -575,14 +690,21 @@ ldt_ss:
+ * compensating for the offset by changing to the ESPFIX segment with
+ * a base address that matches for the difference.
+ */
+-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
+ mov %esp, %edx /* load kernel esp */
mov PT_OLDESP(%esp), %eax /* load userspace esp */
mov %dx, %ax /* eax: new kernel esp */
sub %eax, %edx /* offset (low word is 0) */
-- PER_CPU(gdt_page, %ebx)
+#ifdef CONFIG_SMP
+ movl PER_CPU_VAR(cpu_number), %ebx
+ shll $PAGE_SHIFT_asm, %ebx
@@ -11571,9 +11309,14 @@ index cd49141..cc0b142 100644
+ movl $cpu_gdt_table, %ebx
+#endif
shr $16, %edx
- mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
- mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
-@@ -655,25 +768,19 @@ work_resched:
+- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
+- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
++ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
++ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
+ pushl_cfi $__ESPFIX_SS
+ pushl_cfi %eax /* new kernel esp */
+ /* Disable interrupts, but do not irqtrace this section: we
+@@ -617,23 +739,17 @@ work_resched:
work_notifysig: # deal with pending signals and
# notify-resume requests
@@ -11590,11 +11333,9 @@ index cd49141..cc0b142 100644
- ALIGN
-work_notifysig_v86:
- pushl %ecx # save ti_flags for do_notify_resume
- CFI_ADJUST_CFA_OFFSET 4
+ pushl_cfi %ecx # save ti_flags for do_notify_resume
call save_v86_state # %eax contains pt_regs pointer
- popl %ecx
- CFI_ADJUST_CFA_OFFSET -4
+ popl_cfi %ecx
movl %eax, %esp
-#else
- movl %esp, %eax
@@ -11602,7 +11343,7 @@ index cd49141..cc0b142 100644
#endif
xorl %edx, %edx
call do_notify_resume
-@@ -708,6 +815,10 @@ END(syscall_exit_work)
+@@ -668,6 +784,10 @@ END(syscall_exit_work)
RING0_INT_FRAME # can't unwind into user space anyway
syscall_fault:
@@ -11613,11 +11354,49 @@ index cd49141..cc0b142 100644
GET_THREAD_INFO(%ebp)
movl $-EFAULT,PT_EAX(%esp)
jmp resume_userspace
-@@ -791,7 +902,13 @@ ptregs_clone:
+@@ -750,6 +870,36 @@ ptregs_clone:
+ CFI_ENDPROC
+ ENDPROC(ptregs_clone)
+
++ ALIGN;
++ENTRY(kernel_execve)
++ CFI_STARTPROC
++ pushl_cfi %ebp
++ sub $PT_OLDSS+4,%esp
++ pushl_cfi %edi
++ pushl_cfi %ecx
++ pushl_cfi %eax
++ lea 3*4(%esp),%edi
++ mov $PT_OLDSS/4+1,%ecx
++ xorl %eax,%eax
++ rep stosl
++ popl_cfi %eax
++ popl_cfi %ecx
++ popl_cfi %edi
++ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
++ pushl_cfi %esp
++ call sys_execve
++ add $4,%esp
++ CFI_ADJUST_CFA_OFFSET -4
++ GET_THREAD_INFO(%ebp)
++ test %eax,%eax
++ jz syscall_exit
++ add $PT_OLDSS+4,%esp
++ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
++ popl_cfi %ebp
++ ret
++ CFI_ENDPROC
++ENDPROC(kernel_execve)
++
+ .macro FIXUP_ESPFIX_STACK
+ /*
+ * Switch back for ESPFIX stack to the normal zerobased stack
+@@ -759,8 +909,15 @@ ENDPROC(ptregs_clone)
* normal stack and adjusts ESP with the matching offset.
*/
/* fixup the stack */
-- PER_CPU(gdt_page, %ebx)
+- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
+- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
+#ifdef CONFIG_SMP
+ movl PER_CPU_VAR(cpu_number), %ebx
+ shll $PAGE_SHIFT_asm, %ebx
@@ -11625,10 +11404,12 @@ index cd49141..cc0b142 100644
+#else
+ movl $cpu_gdt_table, %ebx
+#endif
- mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
- mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
++ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
++ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
shl $16, %eax
-@@ -1273,7 +1390,6 @@ return_to_handler:
+ addl %esp, %eax /* the adjusted stack pointer */
+ pushl_cfi $__KERNEL_DS
+@@ -1211,7 +1368,6 @@ return_to_handler:
jmp *%ecx
#endif
@@ -11636,7 +11417,7 @@ index cd49141..cc0b142 100644
#include "syscall_table_32.S"
syscall_table_size=(.-sys_call_table)
-@@ -1330,9 +1446,12 @@ error_code:
+@@ -1257,9 +1413,12 @@ error_code:
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
REG_TO_PTGS %ecx
SET_KERNEL_GS %ecx
@@ -11650,7 +11431,7 @@ index cd49141..cc0b142 100644
TRACE_IRQS_OFF
movl %esp,%eax # pt_regs pointer
call *%edi
-@@ -1426,6 +1545,9 @@ nmi_stack_correct:
+@@ -1344,6 +1503,9 @@ nmi_stack_correct:
xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer
call do_nmi
@@ -11660,7 +11441,7 @@ index cd49141..cc0b142 100644
jmp restore_all_notrace
CFI_ENDPROC
-@@ -1466,6 +1588,9 @@ nmi_espfix_stack:
+@@ -1380,6 +1542,9 @@ nmi_espfix_stack:
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx,%edx # zero error code
call do_nmi
@@ -11670,10 +11451,9 @@ index cd49141..cc0b142 100644
RESTORE_REGS
lss 12+4(%esp), %esp # back to espfix stack
CFI_ADJUST_CFA_OFFSET -24
-diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 4db7c4d..1f56a44 100644
---- a/arch/x86/kernel/entry_64.S
-+++ b/arch/x86/kernel/entry_64.S
+diff -urNp linux-2.6.37/arch/x86/kernel/entry_64.S linux-2.6.37/arch/x86/kernel/entry_64.S
+--- linux-2.6.37/arch/x86/kernel/entry_64.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/entry_64.S 2011-01-17 02:41:01.000000000 -0500
@@ -53,6 +53,7 @@
#include <asm/paravirt.h>
#include <asm/ftrace.h>
@@ -11872,7 +11652,7 @@ index 4db7c4d..1f56a44 100644
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
-@@ -317,7 +501,7 @@ ENTRY(save_args)
+@@ -312,7 +496,7 @@ ENTRY(save_args)
leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
movq_cfi rbp, 8 /* push %rbp */
leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
@@ -11881,7 +11661,7 @@ index 4db7c4d..1f56a44 100644
je 1f
SWAPGS
/*
-@@ -409,7 +593,7 @@ ENTRY(ret_from_fork)
+@@ -403,7 +587,7 @@ ENTRY(ret_from_fork)
RESTORE_REST
@@ -11890,7 +11670,7 @@ index 4db7c4d..1f56a44 100644
je int_ret_from_sys_call
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
-@@ -468,6 +652,11 @@ ENTRY(system_call_after_swapgs)
+@@ -462,6 +646,11 @@ ENTRY(system_call_after_swapgs)
movq %rsp,PER_CPU_VAR(old_rsp)
movq PER_CPU_VAR(kernel_stack),%rsp
@@ -11902,7 +11682,7 @@ index 4db7c4d..1f56a44 100644
/*
* No need to follow this irqs off/on section - it's straight
* and short:
-@@ -502,6 +691,11 @@ sysret_check:
+@@ -496,6 +685,11 @@ sysret_check:
andl %edi,%edx
jnz sysret_careful
CFI_REMEMBER_STATE
@@ -11914,7 +11694,7 @@ index 4db7c4d..1f56a44 100644
/*
* sysretq will re-enable interrupts:
*/
-@@ -613,7 +807,7 @@ tracesys:
+@@ -605,7 +799,7 @@ tracesys:
GLOBAL(int_ret_from_sys_call)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -11923,8 +11703,8 @@ index 4db7c4d..1f56a44 100644
je retint_restore_args
movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */
-@@ -800,6 +994,16 @@ END(interrupt)
- CFI_ADJUST_CFA_OFFSET 10*8
+@@ -786,6 +980,16 @@ END(interrupt)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8
call save_args
PARTIAL_FRAME 0
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -11940,7 +11720,7 @@ index 4db7c4d..1f56a44 100644
call \func
.endm
-@@ -826,7 +1030,7 @@ ret_from_intr:
+@@ -813,7 +1017,7 @@ ret_from_intr:
CFI_ADJUST_CFA_OFFSET -8
exit_intr:
GET_THREAD_INFO(%rcx)
@@ -11949,7 +11729,7 @@ index 4db7c4d..1f56a44 100644
je retint_kernel
/* Interrupt came from user space */
-@@ -848,12 +1052,18 @@ retint_swapgs: /* return to user-space */
+@@ -835,12 +1039,18 @@ retint_swapgs: /* return to user-space
* The iretq could re-enable interrupts:
*/
DISABLE_INTERRUPTS(CLBR_ANY)
@@ -11968,8 +11748,8 @@ index 4db7c4d..1f56a44 100644
/*
* The iretq could re-enable interrupts:
*/
-@@ -1040,6 +1250,16 @@ ENTRY(\sym)
- CFI_ADJUST_CFA_OFFSET 15*8
+@@ -1012,6 +1222,16 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -11985,8 +11765,8 @@ index 4db7c4d..1f56a44 100644
movq %rsp,%rdi /* pt_regs pointer */
xorl %esi,%esi /* no error code */
call \do_sym
-@@ -1057,6 +1277,16 @@ ENTRY(\sym)
- subq $15*8, %rsp
+@@ -1029,6 +1249,16 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -12002,8 +11782,17 @@ index 4db7c4d..1f56a44 100644
movq %rsp,%rdi /* pt_regs pointer */
xorl %esi,%esi /* no error code */
call \do_sym
-@@ -1074,9 +1304,24 @@ ENTRY(\sym)
- subq $15*8, %rsp
+@@ -1037,7 +1267,7 @@ ENTRY(\sym)
+ END(\sym)
+ .endm
+
+-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
++#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
+ .macro paranoidzeroentry_ist sym do_sym ist
+ ENTRY(\sym)
+ INTR_FRAME
+@@ -1047,8 +1277,24 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -12018,18 +11807,17 @@ index 4db7c4d..1f56a44 100644
+#endif
movq %rsp,%rdi /* pt_regs pointer */
xorl %esi,%esi /* no error code */
-- PER_CPU(init_tss, %r12)
+#ifdef CONFIG_SMP
+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
+ lea init_tss(%r12), %r12
+#else
+ lea init_tss(%rip), %r12
+#endif
- subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
+ subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
call \do_sym
- addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
-@@ -1093,6 +1338,16 @@ ENTRY(\sym)
- CFI_ADJUST_CFA_OFFSET 15*8
+ addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
+@@ -1065,6 +1311,16 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -12045,7 +11833,7 @@ index 4db7c4d..1f56a44 100644
movq %rsp,%rdi /* pt_regs pointer */
movq ORIG_RAX(%rsp),%rsi /* get error code */
movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
-@@ -1112,6 +1367,16 @@ ENTRY(\sym)
+@@ -1084,6 +1340,16 @@ ENTRY(\sym)
call save_paranoid
DEFAULT_FRAME 0
TRACE_IRQS_OFF
@@ -12062,7 +11850,7 @@ index 4db7c4d..1f56a44 100644
movq %rsp,%rdi /* pt_regs pointer */
movq ORIG_RAX(%rsp),%rsi /* get error code */
movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
-@@ -1370,14 +1635,27 @@ ENTRY(paranoid_exit)
+@@ -1343,14 +1609,27 @@ ENTRY(paranoid_exit)
TRACE_IRQS_OFF
testl %ebx,%ebx /* swapgs needed? */
jnz paranoid_restore
@@ -12091,7 +11879,7 @@ index 4db7c4d..1f56a44 100644
TRACE_IRQS_IRETQ 0
RESTORE_ALL 8
jmp irq_return
-@@ -1435,7 +1713,7 @@ ENTRY(error_entry)
+@@ -1408,7 +1687,7 @@ ENTRY(error_entry)
movq_cfi r14, R14+8
movq_cfi r15, R15+8
xorl %ebx,%ebx
@@ -12100,8 +11888,8 @@ index 4db7c4d..1f56a44 100644
je error_kernelspace
error_swapgs:
SWAPGS
-@@ -1499,6 +1777,16 @@ ENTRY(nmi)
- CFI_ADJUST_CFA_OFFSET 15*8
+@@ -1472,6 +1751,16 @@ ENTRY(nmi)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
DEFAULT_FRAME 0
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -12117,7 +11905,7 @@ index 4db7c4d..1f56a44 100644
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
movq $-1,%rsi
-@@ -1509,11 +1797,12 @@ ENTRY(nmi)
+@@ -1482,11 +1771,12 @@ ENTRY(nmi)
DISABLE_INTERRUPTS(CLBR_NONE)
testl %ebx,%ebx /* swapgs needed? */
jnz nmi_restore
@@ -12131,10 +11919,9 @@ index 4db7c4d..1f56a44 100644
RESTORE_ALL 8
jmp irq_return
nmi_userspace:
-diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
-index cd37469..1a535d2 100644
---- a/arch/x86/kernel/ftrace.c
-+++ b/arch/x86/kernel/ftrace.c
+diff -urNp linux-2.6.37/arch/x86/kernel/ftrace.c linux-2.6.37/arch/x86/kernel/ftrace.c
+--- linux-2.6.37/arch/x86/kernel/ftrace.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/ftrace.c 2011-01-17 02:41:01.000000000 -0500
@@ -174,7 +174,9 @@ void ftrace_nmi_enter(void)
if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
@@ -12145,16 +11932,7 @@ index cd37469..1a535d2 100644
atomic_inc(&nmi_update_count);
}
/* Must have previous changes seen before executions */
-@@ -260,7 +262,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
-
-
-
--static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
-+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
-
- static unsigned char *ftrace_nop_replace(void)
- {
-@@ -273,6 +275,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+@@ -268,6 +270,8 @@ ftrace_modify_code(unsigned long ip, uns
{
unsigned char replaced[MCOUNT_INSN_SIZE];
@@ -12163,7 +11941,7 @@ index cd37469..1a535d2 100644
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
-@@ -329,7 +333,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
+@@ -324,7 +328,7 @@ int ftrace_update_ftrace_func(ftrace_fun
unsigned char old[MCOUNT_INSN_SIZE], *new;
int ret;
@@ -12172,26 +11950,7 @@ index cd37469..1a535d2 100644
new = ftrace_call_replace(ip, (unsigned long)func);
ret = ftrace_modify_code(ip, old, new);
-@@ -382,15 +386,15 @@ int __init ftrace_dyn_arch_init(void *data)
- switch (faulted) {
- case 0:
- pr_info("converting mcount calls to 0f 1f 44 00 00\n");
-- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
-+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
- break;
- case 1:
- pr_info("converting mcount calls to 66 66 66 66 90\n");
-- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
-+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
- break;
- case 2:
- pr_info("converting mcount calls to jmp . + 5\n");
-- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
-+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
- break;
- }
-
-@@ -411,6 +415,8 @@ static int ftrace_mod_jmp(unsigned long ip,
+@@ -350,6 +354,8 @@ static int ftrace_mod_jmp(unsigned long
{
unsigned char code[MCOUNT_INSN_SIZE];
@@ -12200,31 +11959,29 @@ index cd37469..1a535d2 100644
if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
-diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
-index b2e2460..cd2698e 100644
---- a/arch/x86/kernel/head32.c
-+++ b/arch/x86/kernel/head32.c
-@@ -17,6 +17,7 @@
- #include <asm/apic.h>
+diff -urNp linux-2.6.37/arch/x86/kernel/head32.c linux-2.6.37/arch/x86/kernel/head32.c
+--- linux-2.6.37/arch/x86/kernel/head32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/head32.c 2011-01-17 02:41:01.000000000 -0500
+@@ -19,6 +19,7 @@
#include <asm/io_apic.h>
#include <asm/bios_ebda.h>
+ #include <asm/tlbflush.h>
+#include <asm/boot.h>
static void __init i386_default_early_setup(void)
{
-@@ -40,7 +41,7 @@ void __init i386_start_kernel(void)
- "EX TRAMPOLINE");
+@@ -43,7 +44,7 @@ void __init i386_start_kernel(void)
+ memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
#endif
-- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
-+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
++ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
-diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
-index 75e3981..5674277 100644
---- a/arch/x86/kernel/head_32.S
-+++ b/arch/x86/kernel/head_32.S
+diff -urNp linux-2.6.37/arch/x86/kernel/head_32.S linux-2.6.37/arch/x86/kernel/head_32.S
+--- linux-2.6.37/arch/x86/kernel/head_32.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/head_32.S 2011-01-25 20:24:56.000000000 -0500
@@ -25,6 +25,12 @@
/* Physical address */
#define pa(X) ((X) - __PAGE_OFFSET)
@@ -12249,9 +12006,9 @@ index 75e3981..5674277 100644
-#endif
+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
- /* Enough space to fit pagetables for the low memory linear map */
- MAPPING_BEYOND_END = \
-@@ -75,6 +77,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
+ /* Number of possible pages in the lowmem region */
+ LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
+@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
RESERVE_BRK(pagetables, INIT_MAP_SIZE)
/*
@@ -12264,7 +12021,7 @@ index 75e3981..5674277 100644
* 32-bit kernel entrypoint; only used by the boot CPU. On entry,
* %esi points to the real-mode code as a 32-bit pointer.
* CS and DS must be 4 GB flat segments, but we don't depend on
-@@ -82,6 +90,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
* can.
*/
__HEAD
@@ -12278,7 +12035,7 @@ index 75e3981..5674277 100644
ENTRY(startup_32)
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
us to not reload segments */
-@@ -99,6 +114,55 @@ ENTRY(startup_32)
+@@ -101,6 +116,57 @@ ENTRY(startup_32)
movl %eax,%gs
2:
@@ -12299,6 +12056,8 @@ index 75e3981..5674277 100644
+ movl $pa(cpu_gdt_table),%edi
+1:
+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
+ addl $PAGE_SIZE_asm,%edi
+ loop 1b
+#endif
@@ -12334,7 +12093,7 @@ index 75e3981..5674277 100644
/*
* Clear BSS first so that there are no surprises...
*/
-@@ -142,9 +206,7 @@ ENTRY(startup_32)
+@@ -150,9 +216,7 @@ ENTRY(startup_32)
cmpl $num_subarch_entries, %eax
jae bad_subarch
@@ -12345,7 +12104,7 @@ index 75e3981..5674277 100644
bad_subarch:
WEAK(lguest_entry)
-@@ -156,10 +218,10 @@ WEAK(xen_entry)
+@@ -164,10 +228,10 @@ WEAK(xen_entry)
__INITDATA
subarch_entries:
@@ -12360,35 +12119,35 @@ index 75e3981..5674277 100644
num_subarch_entries = (. - subarch_entries) / 4
.previous
#endif /* CONFIG_PARAVIRT */
-@@ -220,8 +282,11 @@ default_entry:
+@@ -227,8 +291,11 @@ default_entry:
movl %eax, pa(max_pfn_mapped)
/* Do early initialization of the fixmap area */
-- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
-- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
+- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
+#ifdef CONFIG_COMPAT_VDSO
-+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
+#else
-+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
+#endif
#else /* Not PAE */
page_pde_offset = (__PAGE_OFFSET >> 20);
-@@ -251,8 +316,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+@@ -258,8 +325,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
movl %eax, pa(max_pfn_mapped)
/* Do early initialization of the fixmap area */
-- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
-- movl %eax,pa(swapper_pg_dir+0xffc)
+- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(initial_page_table+0xffc)
+#ifdef CONFIG_COMPAT_VDSO
-+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
+#else
-+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
+#endif
#endif
jmp 3f
/*
-@@ -299,6 +367,7 @@ ENTRY(startup_32_smp)
+@@ -306,6 +376,7 @@ ENTRY(startup_32_smp)
orl %edx,%eax
movl %eax,%cr4
@@ -12396,7 +12155,7 @@ index 75e3981..5674277 100644
testb $X86_CR4_PAE, %al # check if PAE is enabled
jz 6f
-@@ -323,6 +392,9 @@ ENTRY(startup_32_smp)
+@@ -330,6 +401,9 @@ ENTRY(startup_32_smp)
/* Make changes effective */
wrmsr
@@ -12406,7 +12165,7 @@ index 75e3981..5674277 100644
6:
/*
-@@ -348,9 +420,7 @@ ENTRY(startup_32_smp)
+@@ -355,9 +429,7 @@ ENTRY(startup_32_smp)
#ifdef CONFIG_SMP
cmpb $0, ready
@@ -12417,7 +12176,7 @@ index 75e3981..5674277 100644
#endif /* CONFIG_SMP */
/*
-@@ -428,7 +498,7 @@ is386: movl $2,%ecx # set MP
+@@ -435,7 +507,7 @@ is386: movl $2,%ecx # set MP
1: movl $(__KERNEL_DS),%eax # reload all the segment registers
movl %eax,%ss # after changing gdt.
@@ -12426,7 +12185,7 @@ index 75e3981..5674277 100644
movl %eax,%ds
movl %eax,%es
-@@ -442,8 +512,11 @@ is386: movl $2,%ecx # set MP
+@@ -449,15 +521,22 @@ is386: movl $2,%ecx # set MP
*/
cmpb $0,ready
jne 1f
@@ -12439,7 +12198,19 @@ index 75e3981..5674277 100644
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
shrl $16, %ecx
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
-@@ -461,10 +534,6 @@ is386: movl $2,%ecx # set MP
+ movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
+ 1:
+-#endif
+ movl $(__KERNEL_STACK_CANARY),%eax
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS),%eax
++#else
++ xorl %eax,%eax
++#endif
+ movl %eax,%gs
+
+ xorl %eax,%eax # Clear LDT
+@@ -468,10 +547,6 @@ is386: movl $2,%ecx # set MP
#ifdef CONFIG_SMP
movb ready, %cl
movb $1, ready
@@ -12450,7 +12221,7 @@ index 75e3981..5674277 100644
#endif /* CONFIG_SMP */
jmp *(initial_code)
-@@ -550,22 +619,22 @@ early_page_fault:
+@@ -557,22 +632,22 @@ early_page_fault:
jmp early_fault
early_fault:
@@ -12478,7 +12249,7 @@ index 75e3981..5674277 100644
hlt_loop:
hlt
jmp hlt_loop
-@@ -573,8 +642,11 @@ hlt_loop:
+@@ -580,8 +655,11 @@ hlt_loop:
/* This is the default interrupt "handler" :-) */
ALIGN
ignore_int:
@@ -12491,7 +12262,7 @@ index 75e3981..5674277 100644
pushl %eax
pushl %ecx
pushl %edx
-@@ -583,9 +655,6 @@ ignore_int:
+@@ -590,9 +668,6 @@ ignore_int:
movl $(__KERNEL_DS),%eax
movl %eax,%ds
movl %eax,%es
@@ -12501,39 +12272,36 @@ index 75e3981..5674277 100644
pushl 16(%esp)
pushl 24(%esp)
pushl 32(%esp)
-@@ -614,31 +683,47 @@ ENTRY(initial_page_table)
+@@ -619,29 +694,43 @@ ENTRY(initial_code)
/*
* BSS section
*/
-__PAGE_ALIGNED_BSS
- .align PAGE_SIZE_asm
#ifdef CONFIG_X86_PAE
-+.section .swapper_pg_pmd,"a",@progbits
- swapper_pg_pmd:
++.section .initial_pg_pmd,"a",@progbits
+ ENTRY(initial_pg_pmd)
.fill 1024*KPMDS,4,0
#else
-+.section .swapper_pg_dir,"a",@progbits
- ENTRY(swapper_pg_dir)
++.section .initial_page_table,"a",@progbits
+ ENTRY(initial_page_table)
.fill 1024,4,0
#endif
-+.section .swapper_pg_fixmap,"a",@progbits
- swapper_pg_fixmap:
++.section .initial_pg_fixmap,"a",@progbits
+ ENTRY(initial_pg_fixmap)
.fill 1024,4,0
- #ifdef CONFIG_X86_TRAMPOLINE
-+.section .trampoline_pg_dir,"a",@progbits
- ENTRY(trampoline_pg_dir)
++.section .empty_zero_page,"a",@progbits
+ ENTRY(empty_zero_page)
+ .fill 4096,1,0
++.section .swapper_pg_dir,"a",@progbits
+ ENTRY(swapper_pg_dir)
+#ifdef CONFIG_X86_PAE
+ .fill 4,8,0
+#else
.fill 1024,4,0
- #endif
+#endif
+
-+.section .empty_zero_page,"a",@progbits
- ENTRY(empty_zero_page)
- .fill 4096,1,0
-
- /*
++/*
+ * The IDT has to be page-aligned to simplify the Pentium
+ * F0 0F bug workaround.. We have a special link segment
+ * for this.
@@ -12541,20 +12309,19 @@ index 75e3981..5674277 100644
+.section .idt,"a",@progbits
+ENTRY(idt_table)
+ .fill 256,8,0
-+
-+/*
+
+ /*
* This starts the data section.
*/
#ifdef CONFIG_X86_PAE
-__PAGE_ALIGNED_DATA
- /* Page-aligned for the benefit of paravirt? */
- .align PAGE_SIZE_asm
-+.section .swapper_pg_dir,"a",@progbits
-+
- ENTRY(swapper_pg_dir)
- .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
++.section .initial_page_table,"a",@progbits
+ ENTRY(initial_page_table)
+ .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
# if KPMDS == 3
-@@ -657,15 +742,24 @@ ENTRY(swapper_pg_dir)
+@@ -660,15 +749,24 @@ ENTRY(initial_page_table)
# error "Kernel PMDs should be 1, 2 or 3"
# endif
.align PAGE_SIZE_asm /* needs to be page-sized too */
@@ -12580,7 +12347,7 @@ index 75e3981..5674277 100644
early_recursion_flag:
.long 0
-@@ -701,7 +795,7 @@ fault_msg:
+@@ -704,7 +802,7 @@ fault_msg:
.word 0 # 32 bit align gdt_desc.address
boot_gdt_descr:
.word __BOOT_DS+7
@@ -12589,7 +12356,7 @@ index 75e3981..5674277 100644
.word 0 # 32-bit align idt_desc.address
idt_descr:
-@@ -712,7 +806,7 @@ idt_descr:
+@@ -715,7 +813,7 @@ idt_descr:
.word 0 # 32 bit align gdt_desc.address
ENTRY(early_gdt_descr)
.word GDT_ENTRIES*8-1
@@ -12598,7 +12365,7 @@ index 75e3981..5674277 100644
/*
* The boot_gdt must mirror the equivalent in setup.S and is
-@@ -721,5 +815,65 @@ ENTRY(early_gdt_descr)
+@@ -724,5 +822,65 @@ ENTRY(early_gdt_descr)
.align L1_CACHE_BYTES
ENTRY(boot_gdt)
.fill GDT_ENTRY_BOOT_CS,8,0
@@ -12666,10 +12433,9 @@ index 75e3981..5674277 100644
+ /* Be sure this is zeroed to avoid false validations in Xen */
+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
+ .endr
-diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index 3d1e6f1..708f920 100644
---- a/arch/x86/kernel/head_64.S
-+++ b/arch/x86/kernel/head_64.S
+diff -urNp linux-2.6.37/arch/x86/kernel/head_64.S linux-2.6.37/arch/x86/kernel/head_64.S
+--- linux-2.6.37/arch/x86/kernel/head_64.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/head_64.S 2011-01-17 02:41:01.000000000 -0500
@@ -19,6 +19,7 @@
#include <asm/cache.h>
#include <asm/processor-flags.h>
@@ -12678,7 +12444,7 @@ index 3d1e6f1..708f920 100644
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
-@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
+@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
L4_START_KERNEL = pgd_index(__START_KERNEL_map)
L3_START_KERNEL = pud_index(__START_KERNEL_map)
@@ -12704,9 +12470,12 @@ index 3d1e6f1..708f920 100644
- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
--
++ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
+
- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
--
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
+
- /* Add an Identity mapping if I am above 1G */
- leaq _text(%rip), %rdi
- andq $PMD_PAGE_MASK, %rdi
@@ -12715,14 +12484,11 @@ index 3d1e6f1..708f920 100644
- shrq $PUD_SHIFT, %rax
- andq $(PTRS_PER_PUD - 1), %rax
- jz ident_complete
-+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
-
+-
- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
- leaq level3_ident_pgt(%rip), %rbx
- movq %rdx, 0(%rbx, %rax, 8)
-+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
-+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
-
+-
- movq %rdi, %rax
- shrq $PMD_SHIFT, %rax
- andq $(PTRS_PER_PMD - 1), %rax
@@ -12762,7 +12528,7 @@ index 3d1e6f1..708f920 100644
1: wrmsr /* Make changes effective */
/* Setup cr0 */
-@@ -271,7 +268,7 @@ ENTRY(secondary_startup_64)
+@@ -270,7 +267,7 @@ ENTRY(secondary_startup_64)
bad_address:
jmp bad_address
@@ -12771,7 +12537,7 @@ index 3d1e6f1..708f920 100644
#ifdef CONFIG_EARLY_PRINTK
.globl early_idt_handlers
early_idt_handlers:
-@@ -316,18 +313,23 @@ ENTRY(early_idt_handler)
+@@ -315,18 +312,23 @@ ENTRY(early_idt_handler)
#endif /* EARLY_PRINTK */
1: hlt
jmp 1b
@@ -12796,7 +12562,15 @@ index 3d1e6f1..708f920 100644
#define NEXT_PAGE(name) \
.balign PAGE_SIZE; \
ENTRY(name)
-@@ -351,13 +353,36 @@ NEXT_PAGE(init_level4_pgt)
+@@ -339,7 +341,6 @@ ENTRY(name)
+ i = i + 1 ; \
+ .endr
+
+- .data
+ /*
+ * This default setting generates an ident mapping at address 0x100000
+ * and a mapping for the kernel that precisely maps virtual address
+@@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
.org init_level4_pgt + L4_PAGE_OFFSET*8, 0
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
@@ -12833,7 +12607,7 @@ index 3d1e6f1..708f920 100644
NEXT_PAGE(level3_kernel_pgt)
.fill L3_START_KERNEL,8,0
-@@ -365,20 +390,23 @@ NEXT_PAGE(level3_kernel_pgt)
+@@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
.quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
.quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
@@ -12865,7 +12639,7 @@ index 3d1e6f1..708f920 100644
NEXT_PAGE(level2_kernel_pgt)
/*
-@@ -391,33 +419,55 @@ NEXT_PAGE(level2_kernel_pgt)
+@@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
* If you want to increase this then increase MODULES_VADDR
* too.)
*/
@@ -12931,10 +12705,9 @@ index 3d1e6f1..708f920 100644
__PAGE_ALIGNED_BSS
.align PAGE_SIZE
-diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
-index 9c3bd4a..e1d9b35 100644
---- a/arch/x86/kernel/i386_ksyms_32.c
-+++ b/arch/x86/kernel/i386_ksyms_32.c
+diff -urNp linux-2.6.37/arch/x86/kernel/i386_ksyms_32.c linux-2.6.37/arch/x86/kernel/i386_ksyms_32.c
+--- linux-2.6.37/arch/x86/kernel/i386_ksyms_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/i386_ksyms_32.c 2011-01-17 02:41:01.000000000 -0500
@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
EXPORT_SYMBOL(cmpxchg8b_emu);
#endif
@@ -12956,10 +12729,9 @@ index 9c3bd4a..e1d9b35 100644
+#ifdef CONFIG_PAX_KERNEXEC
+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
+#endif
-diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
-index 43e9ccf..c40716b 100644
---- a/arch/x86/kernel/init_task.c
-+++ b/arch/x86/kernel/init_task.c
+diff -urNp linux-2.6.37/arch/x86/kernel/init_task.c linux-2.6.37/arch/x86/kernel/init_task.c
+--- linux-2.6.37/arch/x86/kernel/init_task.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/init_task.c 2011-01-17 02:41:01.000000000 -0500
@@ -38,5 +38,5 @@ EXPORT_SYMBOL(init_task);
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
@@ -12968,10 +12740,9 @@ index 43e9ccf..c40716b 100644
-
+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
+EXPORT_SYMBOL(init_tss);
-diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
-index 8eec0ec..ef34e2d 100644
---- a/arch/x86/kernel/ioport.c
-+++ b/arch/x86/kernel/ioport.c
+diff -urNp linux-2.6.37/arch/x86/kernel/ioport.c linux-2.6.37/arch/x86/kernel/ioport.c
+--- linux-2.6.37/arch/x86/kernel/ioport.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/ioport.c 2011-01-17 02:41:01.000000000 -0500
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -12980,7 +12751,7 @@ index 8eec0ec..ef34e2d 100644
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/ioport.h>
-@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
return -EINVAL;
@@ -12993,7 +12764,7 @@ index 8eec0ec..ef34e2d 100644
if (turn_on && !capable(CAP_SYS_RAWIO))
return -EPERM;
-@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
* because the ->io_bitmap_max value must match the bitmap
* contents:
*/
@@ -13002,7 +12773,7 @@ index 8eec0ec..ef34e2d 100644
set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
-@@ -112,6 +119,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
+@@ -112,6 +119,12 @@ long sys_iopl(unsigned int level, struct
return -EINVAL;
/* Trying to gain more privileges? */
if (level > old) {
@@ -13015,11 +12786,10 @@ index 8eec0ec..ef34e2d 100644
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
}
-diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
-index 10709f2..eff1abb 100644
---- a/arch/x86/kernel/irq_32.c
-+++ b/arch/x86/kernel/irq_32.c
-@@ -94,7 +94,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+diff -urNp linux-2.6.37/arch/x86/kernel/irq_32.c linux-2.6.37/arch/x86/kernel/irq_32.c
+--- linux-2.6.37/arch/x86/kernel/irq_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/irq_32.c 2011-01-24 18:04:15.000000000 -0500
+@@ -91,7 +91,7 @@ execute_on_irq_stack(int overflow, struc
return 0;
/* build the stack frame on the IRQ stack */
@@ -13028,29 +12798,89 @@ index 10709f2..eff1abb 100644
irqctx->tinfo.task = curctx->tinfo.task;
irqctx->tinfo.previous_esp = current_stack_pointer;
-@@ -175,7 +175,7 @@ asmlinkage void do_softirq(void)
+@@ -103,6 +103,10 @@ execute_on_irq_stack(int overflow, struc
+ (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(irqctx->tinfo.addr_limit);
++#endif
++
+ if (unlikely(overflow))
+ call_on_stack(print_stack_overflow, isp);
+
+@@ -113,6 +117,11 @@ execute_on_irq_stack(int overflow, struc
+ : "0" (irq), "1" (desc), "2" (isp),
+ "D" (desc->handle_irq)
+ : "memory", "cc", "ecx");
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(curctx->tinfo.addr_limit);
++#endif
++
+ return 1;
+ }
+
+@@ -129,8 +138,7 @@ void __cpuinit irq_ctx_init(int cpu)
+ irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+ THREAD_FLAGS,
+ THREAD_ORDER));
+- irqctx->tinfo.task = NULL;
+- irqctx->tinfo.exec_domain = NULL;
++ memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
+ irqctx->tinfo.cpu = cpu;
+ irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+@@ -140,10 +148,8 @@ void __cpuinit irq_ctx_init(int cpu)
+ irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+ THREAD_FLAGS,
+ THREAD_ORDER));
+- irqctx->tinfo.task = NULL;
+- irqctx->tinfo.exec_domain = NULL;
++ memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
+ irqctx->tinfo.cpu = cpu;
+- irqctx->tinfo.preempt_count = 0;
+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+
+ per_cpu(softirq_ctx, cpu) = irqctx;
+@@ -171,9 +177,18 @@ asmlinkage void do_softirq(void)
irqctx->tinfo.previous_esp = current_stack_pointer;
/* build the stack frame on the softirq stack */
- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(irqctx->tinfo.addr_limit);
++#endif
call_on_stack(__do_softirq, isp);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(curctx->addr_limit);
++#endif
++
/*
-diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
-index 01ab17a..5512ff4 100644
---- a/arch/x86/kernel/kgdb.c
-+++ b/arch/x86/kernel/kgdb.c
-@@ -77,7 +77,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
- gdb_regs[GDB_CS] = regs->cs;
- gdb_regs[GDB_FS] = 0xFFFF;
- gdb_regs[GDB_GS] = 0xFFFF;
-- if (user_mode_vm(regs)) {
-+ if (user_mode(regs)) {
- gdb_regs[GDB_SS] = regs->ss;
- gdb_regs[GDB_SP] = regs->sp;
- } else {
-@@ -720,7 +720,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+ * Shouldnt happen, we returned above if in_interrupt():
+ */
+diff -urNp linux-2.6.37/arch/x86/kernel/kgdb.c linux-2.6.37/arch/x86/kernel/kgdb.c
+--- linux-2.6.37/arch/x86/kernel/kgdb.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/kgdb.c 2011-01-17 02:41:01.000000000 -0500
+@@ -123,11 +123,11 @@ char *dbg_get_reg(int regno, void *mem,
+ switch (regno) {
+ #ifdef CONFIG_X86_32
+ case GDB_SS:
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ *(unsigned long *)mem = __KERNEL_DS;
+ break;
+ case GDB_SP:
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ *(unsigned long *)mem = kernel_stack_pointer(regs);
+ break;
+ case GDB_GS:
+@@ -722,7 +722,7 @@ void kgdb_arch_set_pc(struct pt_regs *re
regs->ip = ip;
}
@@ -13059,16 +12889,13 @@ index 01ab17a..5512ff4 100644
/* Breakpoint instruction: */
.gdb_bpt_instr = { 0xcc },
.flags = KGDB_HW_BREAKPOINT,
-diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
-index 675879b..8c44581 100644
---- a/arch/x86/kernel/kprobes.c
-+++ b/arch/x86/kernel/kprobes.c
-@@ -114,9 +114,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
- s32 raddr;
+diff -urNp linux-2.6.37/arch/x86/kernel/kprobes.c linux-2.6.37/arch/x86/kernel/kprobes.c
+--- linux-2.6.37/arch/x86/kernel/kprobes.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/kprobes.c 2011-01-17 02:41:01.000000000 -0500
+@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
} __attribute__((packed)) *insn;
-- insn = (struct __arch_relative_insn *)from;
-+ insn = (struct __arch_relative_insn *)(ktla_ktva(from));
+ insn = (struct __arch_relative_insn *)from;
+
+ pax_open_kernel();
insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
@@ -13077,7 +12904,16 @@ index 675879b..8c44581 100644
}
/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
-@@ -315,7 +318,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
+@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
+ kprobe_opcode_t opcode;
+ kprobe_opcode_t *orig_opcodes = opcodes;
+
+- if (search_exception_tables((unsigned long)opcodes))
++ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
+ return 0; /* Page fault may occur on this address. */
+
+ retry:
+@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
}
}
insn_get_length(&insn);
@@ -13087,7 +12923,7 @@ index 675879b..8c44581 100644
#ifdef CONFIG_X86_64
if (insn_rip_relative(&insn)) {
-@@ -339,7 +344,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
+@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
(u8 *) dest;
BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
disp = (u8 *) dest + insn_offset_displacement(&insn);
@@ -13097,7 +12933,7 @@ index 675879b..8c44581 100644
}
#endif
return insn.length;
-@@ -353,12 +360,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
+@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
*/
__copy_instruction(p->ainsn.insn, p->addr, 0);
@@ -13112,7 +12948,7 @@ index 675879b..8c44581 100644
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
-@@ -475,7 +482,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
* nor set current_kprobe, because it doesn't use single
* stepping.
*/
@@ -13121,7 +12957,7 @@ index 675879b..8c44581 100644
preempt_enable_no_resched();
return;
}
-@@ -494,7 +501,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
if (p->opcode == BREAKPOINT_INSTRUCTION)
regs->ip = (unsigned long)p->addr;
else
@@ -13130,7 +12966,7 @@ index 675879b..8c44581 100644
}
/*
-@@ -573,7 +580,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
+@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
setup_singlestep(p, regs, kcb, 0);
return 1;
}
@@ -13139,7 +12975,7 @@ index 675879b..8c44581 100644
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
-@@ -799,7 +806,7 @@ static void __kprobes resume_execution(struct kprobe *p,
+@@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
unsigned long *tos = stack_addr(regs);
@@ -13148,7 +12984,7 @@ index 675879b..8c44581 100644
unsigned long orig_ip = (unsigned long)p->addr;
kprobe_opcode_t *insn = p->ainsn.insn;
-@@ -982,7 +989,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+@@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
struct die_args *args = data;
int ret = NOTIFY_DONE;
@@ -13157,11 +12993,42 @@ index 675879b..8c44581 100644
return ret;
switch (val) {
-diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
-index ea69726..604d066 100644
---- a/arch/x86/kernel/ldt.c
-+++ b/arch/x86/kernel/ldt.c
-@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+@@ -1368,7 +1375,7 @@ int __kprobes arch_prepare_optimized_kpr
+ * Verify if the address gap is in 2GB range, because this uses
+ * a relative jump.
+ */
+- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
++ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
+ if (abs(rel) > 0x7fffffff)
+ return -ERANGE;
+
+@@ -1389,11 +1396,11 @@ int __kprobes arch_prepare_optimized_kpr
+ synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
+
+ /* Set probe function call */
+- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
++ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
+
+ /* Set returning jmp instruction at the tail of out-of-line buffer */
+ synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
+- (u8 *)op->kp.addr + op->optinsn.size);
++ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
+
+ flush_icache_range((unsigned long) buf,
+ (unsigned long) buf + TMPL_END_IDX +
+@@ -1409,7 +1416,7 @@ int __kprobes arch_optimize_kprobe(struc
+ ((long)op->kp.addr + RELATIVEJUMP_SIZE));
+
+ /* Backup instructions which will be replaced by jump address */
+- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
++ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
+ RELATIVE_ADDR_SIZE);
+
+ jmp_code[0] = RELATIVEJUMP_OPCODE;
+diff -urNp linux-2.6.37/arch/x86/kernel/ldt.c linux-2.6.37/arch/x86/kernel/ldt.c
+--- linux-2.6.37/arch/x86/kernel/ldt.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/ldt.c 2011-01-17 02:41:01.000000000 -0500
+@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
if (reload) {
#ifdef CONFIG_SMP
preempt_disable();
@@ -13177,7 +13044,7 @@ index ea69726..604d066 100644
#endif
}
if (oldsize) {
-@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
return err;
for (i = 0; i < old->size; i++)
@@ -13186,7 +13053,7 @@ index ea69726..604d066 100644
return 0;
}
-@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
retval = copy_ldt(&mm->context, &old_mm->context);
mutex_unlock(&old_mm->context.lock);
}
@@ -13211,7 +13078,7 @@ index ea69726..604d066 100644
return retval;
}
-@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
}
}
@@ -13225,10 +13092,9 @@ index ea69726..604d066 100644
fill_ldt(&ldt, &ldt_info);
if (oldmode)
ldt.avl = 0;
-diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
-index a3fa43b..8966f4c 100644
---- a/arch/x86/kernel/machine_kexec_32.c
-+++ b/arch/x86/kernel/machine_kexec_32.c
+diff -urNp linux-2.6.37/arch/x86/kernel/machine_kexec_32.c linux-2.6.37/arch/x86/kernel/machine_kexec_32.c
+--- linux-2.6.37/arch/x86/kernel/machine_kexec_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/machine_kexec_32.c 2011-01-17 02:41:01.000000000 -0500
@@ -27,7 +27,7 @@
#include <asm/cacheflush.h>
#include <asm/debugreg.h>
@@ -13238,7 +13104,7 @@ index a3fa43b..8966f4c 100644
{
struct desc_ptr curidt;
-@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
+@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
}
@@ -13256,11 +13122,10 @@ index a3fa43b..8966f4c 100644
relocate_kernel_ptr = control_page;
page_list[PA_CONTROL_PAGE] = __pa(control_page);
-diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
-index e1af7c0..db0032e 100644
---- a/arch/x86/kernel/microcode_amd.c
-+++ b/arch/x86/kernel/microcode_amd.c
-@@ -331,7 +331,7 @@ static void microcode_fini_cpu_amd(int cpu)
+diff -urNp linux-2.6.37/arch/x86/kernel/microcode_amd.c linux-2.6.37/arch/x86/kernel/microcode_amd.c
+--- linux-2.6.37/arch/x86/kernel/microcode_amd.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/microcode_amd.c 2011-01-17 02:41:01.000000000 -0500
+@@ -331,7 +331,7 @@ static void microcode_fini_cpu_amd(int c
uci->mc = NULL;
}
@@ -13269,7 +13134,7 @@ index e1af7c0..db0032e 100644
.request_microcode_user = request_microcode_user,
.request_microcode_fw = request_microcode_fw,
.collect_cpu_info = collect_cpu_info_amd,
-@@ -339,7 +339,7 @@ static struct microcode_ops microcode_amd_ops = {
+@@ -339,7 +339,7 @@ static struct microcode_ops microcode_am
.microcode_fini_cpu = microcode_fini_cpu_amd,
};
@@ -13278,10 +13143,9 @@ index e1af7c0..db0032e 100644
{
return &microcode_amd_ops;
}
-diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
-index fa6551d..43662ff 100644
---- a/arch/x86/kernel/microcode_core.c
-+++ b/arch/x86/kernel/microcode_core.c
+diff -urNp linux-2.6.37/arch/x86/kernel/microcode_core.c linux-2.6.37/arch/x86/kernel/microcode_core.c
+--- linux-2.6.37/arch/x86/kernel/microcode_core.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/microcode_core.c 2011-01-17 02:41:01.000000000 -0500
@@ -92,7 +92,7 @@ MODULE_LICENSE("GPL");
#define MICROCODE_VERSION "2.00"
@@ -13291,11 +13155,10 @@ index fa6551d..43662ff 100644
/*
* Synchronization.
-diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
-index 3561702..f4807b7 100644
---- a/arch/x86/kernel/microcode_intel.c
-+++ b/arch/x86/kernel/microcode_intel.c
-@@ -446,13 +446,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
+diff -urNp linux-2.6.37/arch/x86/kernel/microcode_intel.c linux-2.6.37/arch/x86/kernel/microcode_intel.c
+--- linux-2.6.37/arch/x86/kernel/microcode_intel.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/microcode_intel.c 2011-01-17 02:41:01.000000000 -0500
+@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
static int get_ucode_user(void *to, const void *from, size_t n)
{
@@ -13311,7 +13174,7 @@ index 3561702..f4807b7 100644
}
static void microcode_fini_cpu(int cpu)
-@@ -463,7 +463,7 @@ static void microcode_fini_cpu(int cpu)
+@@ -457,7 +457,7 @@ static void microcode_fini_cpu(int cpu)
uci->mc = NULL;
}
@@ -13320,7 +13183,7 @@ index 3561702..f4807b7 100644
.request_microcode_user = request_microcode_user,
.request_microcode_fw = request_microcode_fw,
.collect_cpu_info = collect_cpu_info,
-@@ -471,7 +471,7 @@ static struct microcode_ops microcode_intel_ops = {
+@@ -465,7 +465,7 @@ static struct microcode_ops microcode_in
.microcode_fini_cpu = microcode_fini_cpu,
};
@@ -13329,10 +13192,9 @@ index 3561702..f4807b7 100644
{
return &microcode_intel_ops;
}
-diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
-index e0bc186..0c3f714 100644
---- a/arch/x86/kernel/module.c
-+++ b/arch/x86/kernel/module.c
+diff -urNp linux-2.6.37/arch/x86/kernel/module.c linux-2.6.37/arch/x86/kernel/module.c
+--- linux-2.6.37/arch/x86/kernel/module.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/module.c 2011-01-17 02:41:01.000000000 -0500
@@ -35,7 +35,7 @@
#define DEBUGP(fmt...)
#endif
@@ -13363,7 +13225,7 @@ index e0bc186..0c3f714 100644
}
/* Free memory returned from module_alloc */
-@@ -59,6 +69,40 @@ void module_free(struct module *mod, void *module_region)
+@@ -59,6 +69,40 @@ void module_free(struct module *mod, voi
vfree(module_region);
}
@@ -13442,7 +13304,7 @@ index e0bc186..0c3f714 100644
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
-@@ -154,21 +204,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+@@ -154,21 +204,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
case R_X86_64_NONE:
break;
case R_X86_64_64:
@@ -13473,24 +13335,10 @@ index e0bc186..0c3f714 100644
#if 0
if ((s64)val != *(s32 *)loc)
goto overflow;
-diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
-index 676b8c7..870ba04 100644
---- a/arch/x86/kernel/paravirt-spinlocks.c
-+++ b/arch/x86/kernel/paravirt-spinlocks.c
-@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
- arch_spin_lock(lock);
- }
-
--struct pv_lock_ops pv_lock_ops = {
-+struct pv_lock_ops pv_lock_ops __read_only = {
- #ifdef CONFIG_SMP
- .spin_is_locked = __ticket_spin_is_locked,
- .spin_is_contended = __ticket_spin_is_contended,
-diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
-index 1db183e..d5174a5 100644
---- a/arch/x86/kernel/paravirt.c
-+++ b/arch/x86/kernel/paravirt.c
-@@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
+diff -urNp linux-2.6.37/arch/x86/kernel/paravirt.c linux-2.6.37/arch/x86/kernel/paravirt.c
+--- linux-2.6.37/arch/x86/kernel/paravirt.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/paravirt.c 2011-01-17 02:41:01.000000000 -0500
+@@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
* corresponding structure. */
static void *get_call_destination(u8 type)
{
@@ -13499,7 +13347,7 @@ index 1db183e..d5174a5 100644
.pv_init_ops = pv_init_ops,
.pv_time_ops = pv_time_ops,
.pv_cpu_ops = pv_cpu_ops,
-@@ -145,14 +145,14 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+@@ -145,14 +145,14 @@ unsigned paravirt_patch_default(u8 type,
if (opfunc == NULL)
/* If there's no function, patch it with a ud2a (BUG) */
ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
@@ -13517,7 +13365,7 @@ index 1db183e..d5174a5 100644
ret = paravirt_patch_ident_64(insnbuf, len);
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
-@@ -178,7 +178,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
+@@ -178,7 +178,7 @@ unsigned paravirt_patch_insns(void *insn
if (insn_len > len || start == NULL)
insn_len = len;
else
@@ -13580,7 +13428,7 @@ index 1db183e..d5174a5 100644
.read_cr2 = native_read_cr2,
.write_cr2 = native_write_cr2,
-@@ -463,6 +463,12 @@ struct pv_mmu_ops pv_mmu_ops = {
+@@ -462,6 +462,12 @@ struct pv_mmu_ops pv_mmu_ops = {
},
.set_fixmap = native_set_fixmap,
@@ -13593,11 +13441,22 @@ index 1db183e..d5174a5 100644
};
EXPORT_SYMBOL_GPL(pv_time_ops);
-diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
-index 078d4ec..f4b9b3c 100644
---- a/arch/x86/kernel/pci-calgary_64.c
-+++ b/arch/x86/kernel/pci-calgary_64.c
-@@ -475,7 +475,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
+diff -urNp linux-2.6.37/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.37/arch/x86/kernel/paravirt-spinlocks.c
+--- linux-2.6.37/arch/x86/kernel/paravirt-spinlocks.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/paravirt-spinlocks.c 2011-01-17 02:41:01.000000000 -0500
+@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
+ arch_spin_lock(lock);
+ }
+
+-struct pv_lock_ops pv_lock_ops = {
++struct pv_lock_ops pv_lock_ops __read_only = {
+ #ifdef CONFIG_SMP
+ .spin_is_locked = __ticket_spin_is_locked,
+ .spin_is_contended = __ticket_spin_is_contended,
+diff -urNp linux-2.6.37/arch/x86/kernel/pci-calgary_64.c linux-2.6.37/arch/x86/kernel/pci-calgary_64.c
+--- linux-2.6.37/arch/x86/kernel/pci-calgary_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/pci-calgary_64.c 2011-01-17 02:41:01.000000000 -0500
+@@ -476,7 +476,7 @@ static void calgary_free_coherent(struct
free_pages((unsigned long)vaddr, get_order(size));
}
@@ -13606,10 +13465,9 @@ index 078d4ec..f4b9b3c 100644
.alloc_coherent = calgary_alloc_coherent,
.free_coherent = calgary_free_coherent,
.map_sg = calgary_map_sg,
-diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
-index 4b7e3d8..0f9c3e9 100644
---- a/arch/x86/kernel/pci-dma.c
-+++ b/arch/x86/kernel/pci-dma.c
+diff -urNp linux-2.6.37/arch/x86/kernel/pci-dma.c linux-2.6.37/arch/x86/kernel/pci-dma.c
+--- linux-2.6.37/arch/x86/kernel/pci-dma.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/pci-dma.c 2011-01-17 02:41:01.000000000 -0500
@@ -16,7 +16,7 @@
static int forbid_dac __read_mostly;
@@ -13619,7 +13477,7 @@ index 4b7e3d8..0f9c3e9 100644
EXPORT_SYMBOL(dma_ops);
static int iommu_sac_force __read_mostly;
-@@ -248,7 +248,7 @@ early_param("iommu", iommu_setup);
+@@ -250,7 +250,7 @@ early_param("iommu", iommu_setup);
int dma_supported(struct device *dev, u64 mask)
{
@@ -13628,11 +13486,10 @@ index 4b7e3d8..0f9c3e9 100644
#ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) {
-diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
-index 0f7f130..ab480fd 100644
---- a/arch/x86/kernel/pci-gart_64.c
-+++ b/arch/x86/kernel/pci-gart_64.c
-@@ -699,7 +699,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
+diff -urNp linux-2.6.37/arch/x86/kernel/pci-gart_64.c linux-2.6.37/arch/x86/kernel/pci-gart_64.c
+--- linux-2.6.37/arch/x86/kernel/pci-gart_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/pci-gart_64.c 2011-01-17 02:41:01.000000000 -0500
+@@ -706,7 +706,7 @@ static __init int init_k8_gatt(struct ag
return -1;
}
@@ -13641,11 +13498,10 @@ index 0f7f130..ab480fd 100644
.map_sg = gart_map_sg,
.unmap_sg = gart_unmap_sg,
.map_page = gart_map_page,
-diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
-index 3af4af8..7950f48 100644
---- a/arch/x86/kernel/pci-nommu.c
-+++ b/arch/x86/kernel/pci-nommu.c
-@@ -95,7 +95,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
+diff -urNp linux-2.6.37/arch/x86/kernel/pci-nommu.c linux-2.6.37/arch/x86/kernel/pci-nommu.c
+--- linux-2.6.37/arch/x86/kernel/pci-nommu.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/pci-nommu.c 2011-01-17 02:41:01.000000000 -0500
+@@ -95,7 +95,7 @@ static void nommu_sync_sg_for_device(str
flush_write_buffers();
}
@@ -13654,11 +13510,10 @@ index 3af4af8..7950f48 100644
.alloc_coherent = dma_generic_alloc_coherent,
.free_coherent = nommu_free_coherent,
.map_sg = nommu_map_sg,
-diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
-index a5bc528..29def6f 100644
---- a/arch/x86/kernel/pci-swiotlb.c
-+++ b/arch/x86/kernel/pci-swiotlb.c
-@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+diff -urNp linux-2.6.37/arch/x86/kernel/pci-swiotlb.c linux-2.6.37/arch/x86/kernel/pci-swiotlb.c
+--- linux-2.6.37/arch/x86/kernel/pci-swiotlb.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/pci-swiotlb.c 2011-01-17 02:41:01.000000000 -0500
+@@ -26,7 +26,7 @@ static void *x86_swiotlb_alloc_coherent(
return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
}
@@ -13667,72 +13522,10 @@ index a5bc528..29def6f 100644
.mapping_error = swiotlb_dma_mapping_error,
.alloc_coherent = x86_swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent,
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index e7e3521..c8f0251 100644
---- a/arch/x86/kernel/process.c
-+++ b/arch/x86/kernel/process.c
-@@ -73,7 +73,7 @@ void exit_thread(void)
- unsigned long *bp = t->io_bitmap_ptr;
-
- if (bp) {
-- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
-+ struct tss_struct *tss = init_tss + get_cpu();
-
- t->io_bitmap_ptr = NULL;
- clear_thread_flag(TIF_IO_BITMAP);
-@@ -107,7 +107,7 @@ void show_regs_common(void)
-
- printk(KERN_CONT "\n");
- printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
-- current->pid, current->comm, print_tainted(),
-+ task_pid_nr(current), current->comm, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version, board, product);
-@@ -117,6 +117,9 @@ void flush_thread(void)
- {
- struct task_struct *tsk = current;
-
-+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR)
-+ loadsegment(gs, 0);
-+#endif
- flush_ptrace_hw_breakpoint(tsk);
- memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
- /*
-@@ -279,8 +282,8 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
- regs.di = (unsigned long) arg;
-
- #ifdef CONFIG_X86_32
-- regs.ds = __USER_DS;
-- regs.es = __USER_DS;
-+ regs.ds = __KERNEL_DS;
-+ regs.es = __KERNEL_DS;
- regs.fs = __KERNEL_PERCPU;
- regs.gs = __KERNEL_STACK_CANARY;
- #else
-@@ -689,17 +692,3 @@ static int __init idle_setup(char *str)
- return 0;
- }
- early_param("idle", idle_setup);
--
--unsigned long arch_align_stack(unsigned long sp)
--{
-- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-- sp -= get_random_int() % 8192;
-- return sp & ~0xf;
--}
--
--unsigned long arch_randomize_brk(struct mm_struct *mm)
--{
-- unsigned long range_end = mm->brk + 0x02000000;
-- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
--}
--
-diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
-index 8d12878..350b125 100644
---- a/arch/x86/kernel/process_32.c
-+++ b/arch/x86/kernel/process_32.c
-@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+diff -urNp linux-2.6.37/arch/x86/kernel/process_32.c linux-2.6.37/arch/x86/kernel/process_32.c
+--- linux-2.6.37/arch/x86/kernel/process_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/process_32.c 2011-01-17 02:41:01.000000000 -0500
+@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return ((unsigned long *)tsk->thread.sp)[3];
@@ -13740,7 +13533,7 @@ index 8d12878..350b125 100644
}
#ifndef CONFIG_SMP
-@@ -126,7 +127,7 @@ void __show_regs(struct pt_regs *regs, int all)
+@@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, i
unsigned long sp;
unsigned short ss, gs;
@@ -13748,8 +13541,17 @@ index 8d12878..350b125 100644
+ if (user_mode(regs)) {
sp = regs->sp;
ss = regs->ss & 0xffff;
- gs = get_user_gs(regs);
-@@ -196,7 +197,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
+- gs = get_user_gs(regs);
+ } else {
+ sp = kernel_stack_pointer(regs);
+ savesegment(ss, ss);
+- savesegment(gs, gs);
+ }
++ gs = get_user_gs(regs);
+
+ show_regs_common();
+
+@@ -200,7 +200,7 @@ int copy_thread(unsigned long clone_flag
struct task_struct *tsk;
int err;
@@ -13758,15 +13560,7 @@ index 8d12878..350b125 100644
*childregs = *regs;
childregs->ax = 0;
childregs->sp = sp;
-@@ -230,6 +231,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
- * Set a new TLS for the child thread?
- */
- if (clone_flags & CLONE_SETTLS)
-+//XXX needs set_fs()?
- err = do_set_thread_area(p, -1,
- (struct user_desc __user *)childregs->si, 0);
-
-@@ -293,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -297,7 +297,7 @@ __switch_to(struct task_struct *prev_p,
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
int cpu = smp_processor_id();
@@ -13775,19 +13569,18 @@ index 8d12878..350b125 100644
bool preload_fpu;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
-@@ -328,6 +330,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -332,6 +332,10 @@ __switch_to(struct task_struct *prev_p,
*/
lazy_save_gs(prev->gs);
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if (!segment_eq(task_thread_info(prev_p)->addr_limit, task_thread_info(next_p)->addr_limit))
-+ __set_fs(task_thread_info(next_p)->addr_limit, cpu);
++ __set_fs(task_thread_info(next_p)->addr_limit);
+#endif
+
/*
* Load the per-thread Thread-Local Storage descriptor.
*/
-@@ -404,3 +411,27 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -408,3 +412,27 @@ unsigned long get_wchan(struct task_stru
return 0;
}
@@ -13815,11 +13608,10 @@ index 8d12878..350b125 100644
+ load_sp0(init_tss + smp_processor_id(), thread);
+}
+#endif
-diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
-index 3c2422a..2904e2d 100644
---- a/arch/x86/kernel/process_64.c
-+++ b/arch/x86/kernel/process_64.c
-@@ -87,7 +87,7 @@ static void __exit_idle(void)
+diff -urNp linux-2.6.37/arch/x86/kernel/process_64.c linux-2.6.37/arch/x86/kernel/process_64.c
+--- linux-2.6.37/arch/x86/kernel/process_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/process_64.c 2011-01-17 02:41:01.000000000 -0500
+@@ -89,7 +89,7 @@ static void __exit_idle(void)
void exit_idle(void)
{
/* idle loop has pid 0 */
@@ -13828,7 +13620,7 @@ index 3c2422a..2904e2d 100644
return;
__exit_idle();
}
-@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -380,7 +380,7 @@ __switch_to(struct task_struct *prev_p,
struct thread_struct *prev = &prev_p->thread;
struct thread_struct *next = &next_p->thread;
int cpu = smp_processor_id();
@@ -13837,7 +13629,7 @@ index 3c2422a..2904e2d 100644
unsigned fsindex, gsindex;
bool preload_fpu;
-@@ -528,12 +528,11 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -533,12 +533,11 @@ unsigned long get_wchan(struct task_stru
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack = (unsigned long)task_stack_page(p);
@@ -13852,12 +13644,74 @@ index 3c2422a..2904e2d 100644
return 0;
ip = *(u64 *)(fp+8);
if (!in_sched_functions(ip))
-diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
-index 70c4872..25fb80f 100644
---- a/arch/x86/kernel/ptrace.c
-+++ b/arch/x86/kernel/ptrace.c
-@@ -804,7 +804,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
- long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+diff -urNp linux-2.6.37/arch/x86/kernel/process.c linux-2.6.37/arch/x86/kernel/process.c
+--- linux-2.6.37/arch/x86/kernel/process.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/process.c 2011-01-17 02:41:01.000000000 -0500
+@@ -74,7 +74,7 @@ void exit_thread(void)
+ unsigned long *bp = t->io_bitmap_ptr;
+
+ if (bp) {
+- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++ struct tss_struct *tss = init_tss + get_cpu();
+
+ t->io_bitmap_ptr = NULL;
+ clear_thread_flag(TIF_IO_BITMAP);
+@@ -108,7 +108,7 @@ void show_regs_common(void)
+
+ printk(KERN_CONT "\n");
+ printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version, board, product);
+@@ -118,6 +118,9 @@ void flush_thread(void)
+ {
+ struct task_struct *tsk = current;
+
++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
++ loadsegment(gs, 0);
++#endif
+ flush_ptrace_hw_breakpoint(tsk);
+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+ /*
+@@ -280,10 +283,10 @@ int kernel_thread(int (*fn)(void *), voi
+ regs.di = (unsigned long) arg;
+
+ #ifdef CONFIG_X86_32
+- regs.ds = __USER_DS;
+- regs.es = __USER_DS;
++ regs.ds = __KERNEL_DS;
++ regs.es = __KERNEL_DS;
+ regs.fs = __KERNEL_PERCPU;
+- regs.gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, regs.gs);
+ #else
+ regs.ss = __KERNEL_DS;
+ #endif
+@@ -658,17 +661,3 @@ static int __init idle_setup(char *str)
+ return 0;
+ }
+ early_param("idle", idle_setup);
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
+-
+diff -urNp linux-2.6.37/arch/x86/kernel/ptrace.c linux-2.6.37/arch/x86/kernel/ptrace.c
+--- linux-2.6.37/arch/x86/kernel/ptrace.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/ptrace.c 2011-01-17 02:41:01.000000000 -0500
+@@ -805,7 +805,7 @@ long arch_ptrace(struct task_struct *chi
+ unsigned long addr, unsigned long data)
{
int ret;
- unsigned long __user *datap = (unsigned long __user *)data;
@@ -13865,24 +13719,24 @@ index 70c4872..25fb80f 100644
switch (request) {
/* read the word at location addr in the USER area. */
-@@ -891,14 +891,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
- if (addr < 0)
+@@ -890,14 +890,14 @@ long arch_ptrace(struct task_struct *chi
+ if ((int) addr < 0)
return -EIO;
ret = do_get_thread_area(child, addr,
-- (struct user_desc __user *) data);
-+ (__force struct user_desc __user *) data);
+- (struct user_desc __user *)data);
++ (__force struct user_desc __user *) data);
break;
case PTRACE_SET_THREAD_AREA:
- if (addr < 0)
+ if ((int) addr < 0)
return -EIO;
ret = do_set_thread_area(child, addr,
-- (struct user_desc __user *) data, 0);
-+ (__force struct user_desc __user *) data, 0);
+- (struct user_desc __user *)data, 0);
++ (__force struct user_desc __user *) data, 0);
break;
#endif
-@@ -1315,7 +1315,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
+@@ -1314,7 +1314,7 @@ static void fill_sigtrap_info(struct tas
memset(info, 0, sizeof(*info));
info->si_signo = SIGTRAP;
info->si_code = si_code;
@@ -13891,10 +13745,9 @@ index 70c4872..25fb80f 100644
}
void user_single_step_siginfo(struct task_struct *tsk,
-diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
-index 76a0d71..0df1a0c 100644
---- a/arch/x86/kernel/reboot.c
-+++ b/arch/x86/kernel/reboot.c
+diff -urNp linux-2.6.37/arch/x86/kernel/reboot.c linux-2.6.37/arch/x86/kernel/reboot.c
+--- linux-2.6.37/arch/x86/kernel/reboot.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/reboot.c 2011-01-17 02:41:01.000000000 -0500
@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
@@ -13904,7 +13757,7 @@ index 76a0d71..0df1a0c 100644
enum reboot_type reboot_type = BOOT_KBD;
int reboot_force;
-@@ -284,7 +284,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
+@@ -284,7 +284,7 @@ static struct dmi_system_id __initdata r
DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
},
},
@@ -13931,7 +13784,7 @@ index 76a0d71..0df1a0c 100644
};
static const struct desc_ptr
-@@ -354,7 +354,7 @@ static const unsigned char jump_to_bios [] =
+@@ -354,7 +354,7 @@ static const unsigned char jump_to_bios
* specified by the code and length parameters.
* We assume that length will aways be less that 100!
*/
@@ -13940,18 +13793,7 @@ index 76a0d71..0df1a0c 100644
{
local_irq_disable();
-@@ -374,8 +374,8 @@ void machine_real_restart(const unsigned char *code, int length)
- /* Remap the kernel at virtual address zero, as well as offset zero
- from the kernel segment. This assumes the kernel segment starts at
- virtual address PAGE_OFFSET. */
-- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
-+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
-
- /*
- * Use `swapper_pg_dir' as our page directory.
-@@ -387,16 +387,15 @@ void machine_real_restart(const unsigned char *code, int length)
+@@ -381,16 +381,15 @@ void machine_real_restart(const unsigned
boot)". This seems like a fairly standard thing that gets set by
REBOOT.COM programs, and the previous reset routine did this
too. */
@@ -13971,11 +13813,10 @@ index 76a0d71..0df1a0c 100644
/* Set up the IDT for real mode. */
load_idt(&real_mode_idt);
-diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index 6600cfd..46a5ea3 100644
---- a/arch/x86/kernel/setup.c
-+++ b/arch/x86/kernel/setup.c
-@@ -704,7 +704,7 @@ static void __init trim_bios_range(void)
+diff -urNp linux-2.6.37/arch/x86/kernel/setup.c linux-2.6.37/arch/x86/kernel/setup.c
+--- linux-2.6.37/arch/x86/kernel/setup.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/setup.c 2011-01-17 02:41:01.000000000 -0500
+@@ -654,7 +654,7 @@ static void __init trim_bios_range(void)
* area (640->1Mb) as ram even though it is not.
* take them out.
*/
@@ -13984,7 +13825,7 @@ index 6600cfd..46a5ea3 100644
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
-@@ -791,14 +791,14 @@ void __init setup_arch(char **cmdline_p)
+@@ -790,14 +790,14 @@ void __init setup_arch(char **cmdline_p)
if (!boot_params.hdr.root_flags)
root_mountflags &= ~MS_RDONLY;
@@ -14004,10 +13845,9 @@ index 6600cfd..46a5ea3 100644
data_resource.end = virt_to_phys(_edata)-1;
bss_resource.start = virt_to_phys(&__bss_start);
bss_resource.end = virt_to_phys(&__bss_stop)-1;
-diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
-index a60df9a..0083b50 100644
---- a/arch/x86/kernel/setup_percpu.c
-+++ b/arch/x86/kernel/setup_percpu.c
+diff -urNp linux-2.6.37/arch/x86/kernel/setup_percpu.c linux-2.6.37/arch/x86/kernel/setup_percpu.c
+--- linux-2.6.37/arch/x86/kernel/setup_percpu.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/setup_percpu.c 2011-01-17 02:41:01.000000000 -0500
@@ -21,19 +21,17 @@
#include <asm/cpu.h>
#include <asm/stackprotector.h>
@@ -14032,7 +13872,7 @@ index a60df9a..0083b50 100644
[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
};
EXPORT_SYMBOL(__per_cpu_offset);
-@@ -161,10 +159,10 @@ static inline void setup_percpu_segment(int cpu)
+@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
{
#ifdef CONFIG_X86_32
struct desc_struct gdt;
@@ -14046,7 +13886,7 @@ index a60df9a..0083b50 100644
write_gdt_entry(get_cpu_gdt_table(cpu),
GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
#endif
-@@ -213,6 +211,11 @@ void __init setup_per_cpu_areas(void)
+@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
/* alrighty, percpu areas up and running */
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu) {
@@ -14058,7 +13898,7 @@ index a60df9a..0083b50 100644
per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
per_cpu(cpu_number, cpu) = cpu;
-@@ -249,6 +252,12 @@ void __init setup_per_cpu_areas(void)
+@@ -243,6 +246,12 @@ void __init setup_per_cpu_areas(void)
set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
#endif
#endif
@@ -14071,11 +13911,10 @@ index a60df9a..0083b50 100644
/*
* Up to this point, the boot CPU has been using .init.data
* area. Reload any changed state for the boot CPU.
-diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
-index 4fd173c..9dc1de7 100644
---- a/arch/x86/kernel/signal.c
-+++ b/arch/x86/kernel/signal.c
-@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
+diff -urNp linux-2.6.37/arch/x86/kernel/signal.c linux-2.6.37/arch/x86/kernel/signal.c
+--- linux-2.6.37/arch/x86/kernel/signal.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/signal.c 2011-01-17 02:41:01.000000000 -0500
+@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
* Align the stack pointer according to the i386 ABI,
* i.e. so that on function entry ((sp + 4) & 15) == 0.
*/
@@ -14084,7 +13923,7 @@ index 4fd173c..9dc1de7 100644
#else /* !CONFIG_X86_32 */
sp = round_down(sp, 16) - 8;
#endif
-@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
+@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
* Return an always-bogus address instead so we will die with SIGSEGV.
*/
if (onsigstack && !likely(on_sig_stack(sp)))
@@ -14098,7 +13937,7 @@ index 4fd173c..9dc1de7 100644
return (void __user *)sp;
}
-@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
+@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
}
if (current->mm->context.vdso)
@@ -14110,7 +13949,7 @@ index 4fd173c..9dc1de7 100644
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
-@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
+@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
* reasons and because gdb uses it as a signature to notice
* signal handler stack frames.
*/
@@ -14119,7 +13958,7 @@ index 4fd173c..9dc1de7 100644
if (err)
return -EFAULT;
-@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Set up to return from userspace. */
@@ -14131,7 +13970,7 @@ index 4fd173c..9dc1de7 100644
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
put_user_ex(restorer, &frame->pretcode);
-@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
* reasons and because gdb uses it as a signature to notice
* signal handler stack frames.
*/
@@ -14140,7 +13979,7 @@ index 4fd173c..9dc1de7 100644
} put_user_catch(err);
if (err)
-@@ -780,7 +783,7 @@ static void do_signal(struct pt_regs *regs)
+@@ -780,7 +783,7 @@ static void do_signal(struct pt_regs *re
* X86_32: vm86 regs switched out by assembly code before reaching
* here, so testing against kernel CS suffices.
*/
@@ -14149,10 +13988,9 @@ index 4fd173c..9dc1de7 100644
return;
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
-index 821ee1b..e29b497 100644
---- a/arch/x86/kernel/smpboot.c
-+++ b/arch/x86/kernel/smpboot.c
+diff -urNp linux-2.6.37/arch/x86/kernel/smpboot.c linux-2.6.37/arch/x86/kernel/smpboot.c
+--- linux-2.6.37/arch/x86/kernel/smpboot.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/smpboot.c 2011-01-17 02:41:01.000000000 -0500
@@ -786,7 +786,11 @@ do_rest:
(unsigned long)task_stack_page(c_idle.idle) -
KERNEL_STACK_OFFSET + THREAD_SIZE;
@@ -14165,7 +14003,7 @@ index 821ee1b..e29b497 100644
initial_code = (unsigned long)start_secondary;
stack_start.sp = (void *) c_idle.idle->thread.sp;
-@@ -926,6 +930,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
+@@ -926,6 +930,12 @@ int __cpuinit native_cpu_up(unsigned int
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
@@ -14176,13 +14014,12 @@ index 821ee1b..e29b497 100644
+#endif
+
err = do_boot_cpu(apicid, cpu);
-
if (err) {
-diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
-index 58de45e..43bc689 100644
---- a/arch/x86/kernel/step.c
-+++ b/arch/x86/kernel/step.c
-@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
+ pr_debug("do_boot_cpu failed %d\n", err);
+diff -urNp linux-2.6.37/arch/x86/kernel/step.c linux-2.6.37/arch/x86/kernel/step.c
+--- linux-2.6.37/arch/x86/kernel/step.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/step.c 2011-01-17 02:41:01.000000000 -0500
+@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
struct desc_struct *desc;
unsigned long base;
@@ -14195,7 +14032,17 @@ index 58de45e..43bc689 100644
addr = -1L; /* bogus selector, access would fault */
else {
desc = child->mm->context.ldt + seg;
-@@ -53,6 +53,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
+@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
+ addr += base;
+ }
+ mutex_unlock(&child->mm->context.lock);
+- }
++ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
++ addr = ktla_ktva(addr);
+
+ return addr;
+ }
+@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
unsigned char opcode[15];
unsigned long addr = convert_ip_to_linear(child, regs);
@@ -14205,7 +14052,7 @@ index 58de45e..43bc689 100644
copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
for (i = 0; i < copied; i++) {
switch (opcode[i]) {
-@@ -74,7 +77,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
+@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
#ifdef CONFIG_X86_64
case 0x40 ... 0x4f:
@@ -14214,16 +14061,35 @@ index 58de45e..43bc689 100644
/* 32-bit mode: register increment */
return 0;
/* 64-bit mode: REX prefix */
-diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
-index 196552b..90850f1 100644
---- a/arch/x86/kernel/sys_i386_32.c
-+++ b/arch/x86/kernel/sys_i386_32.c
-@@ -24,6 +24,228 @@
+diff -urNp linux-2.6.37/arch/x86/kernel/syscall_table_32.S linux-2.6.37/arch/x86/kernel/syscall_table_32.S
+--- linux-2.6.37/arch/x86/kernel/syscall_table_32.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/syscall_table_32.S 2011-01-17 02:41:01.000000000 -0500
+@@ -1,3 +1,4 @@
++.section .rodata,"a",@progbits
+ ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
+ .long sys_exit
+diff -urNp linux-2.6.37/arch/x86/kernel/sys_i386_32.c linux-2.6.37/arch/x86/kernel/sys_i386_32.c
+--- linux-2.6.37/arch/x86/kernel/sys_i386_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/sys_i386_32.c 2011-01-17 02:41:01.000000000 -0500
+@@ -24,17 +24,224 @@
#include <asm/syscalls.h>
+-/*
+- * Do a system call from kernel instead of calling sys_execve so we
+- * end up with proper pt_regs.
+- */
+-int kernel_execve(const char *filename,
+- const char *const argv[],
+- const char *const envp[])
+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
-+{
+ {
+- long __res;
+- asm volatile ("int $0x80"
+- : "=a" (__res)
+- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
+- return __res;
+ unsigned long pax_task_size = TASK_SIZE;
+
+#ifdef CONFIG_PAX_SEGMEXEC
@@ -14442,15 +14308,10 @@ index 196552b..90850f1 100644
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
-+}
-+
- /*
- * Do a system call from kernel instead of calling sys_execve so we
- * end up with proper pt_regs.
-diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
-index ff14a50..a00edfd 100644
---- a/arch/x86/kernel/sys_x86_64.c
-+++ b/arch/x86/kernel/sys_x86_64.c
+ }
+diff -urNp linux-2.6.37/arch/x86/kernel/sys_x86_64.c linux-2.6.37/arch/x86/kernel/sys_x86_64.c
+--- linux-2.6.37/arch/x86/kernel/sys_x86_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/sys_x86_64.c 2011-01-17 02:41:01.000000000 -0500
@@ -32,8 +32,8 @@ out:
return error;
}
@@ -14462,7 +14323,7 @@ index ff14a50..a00edfd 100644
{
if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
unsigned long new_begin;
-@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
+@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
*begin = new_begin;
}
} else {
@@ -14471,7 +14332,7 @@ index ff14a50..a00edfd 100644
*end = TASK_SIZE;
}
}
-@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
if (flags & MAP_FIXED)
return addr;
@@ -14503,7 +14364,7 @@ index ff14a50..a00edfd 100644
/*
* Remember the place where we stopped the search:
*/
-@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
@@ -14512,7 +14373,7 @@ index ff14a50..a00edfd 100644
/* requested length too big for entire address space */
if (len > TASK_SIZE)
-@@ -141,12 +144,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -141,12 +144,15 @@ arch_get_unmapped_area_topdown(struct fi
if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
goto bottomup;
@@ -14530,7 +14391,7 @@ index ff14a50..a00edfd 100644
return addr;
}
-@@ -162,7 +168,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -162,7 +168,7 @@ arch_get_unmapped_area_topdown(struct fi
/* make sure it can fit in the remaining address space */
if (addr > len) {
vma = find_vma(mm, addr-len);
@@ -14539,7 +14400,7 @@ index ff14a50..a00edfd 100644
/* remember the address as a hint for next time */
return mm->free_area_cache = addr-len;
}
-@@ -179,7 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -179,7 +185,7 @@ arch_get_unmapped_area_topdown(struct fi
* return with success:
*/
vma = find_vma(mm, addr);
@@ -14572,19 +14433,9 @@ index ff14a50..a00edfd 100644
mm->cached_hole_size = ~0UL;
return addr;
-diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
-index 8b37293..520368b 100644
---- a/arch/x86/kernel/syscall_table_32.S
-+++ b/arch/x86/kernel/syscall_table_32.S
-@@ -1,3 +1,4 @@
-+.section .rodata,"a",@progbits
- ENTRY(sys_call_table)
- .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
- .long sys_exit
-diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
-index fb5cc5e1..b457784 100644
---- a/arch/x86/kernel/time.c
-+++ b/arch/x86/kernel/time.c
+diff -urNp linux-2.6.37/arch/x86/kernel/time.c linux-2.6.37/arch/x86/kernel/time.c
+--- linux-2.6.37/arch/x86/kernel/time.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/time.c 2011-01-17 02:41:01.000000000 -0500
@@ -26,17 +26,13 @@
int timer_ack;
#endif
@@ -14605,7 +14456,7 @@ index fb5cc5e1..b457784 100644
#else
unsigned long *sp =
(unsigned long *)kernel_stack_pointer(regs);
-@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
+@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
* or above a saved flags. Eflags has bits 22-31 zero,
* kernel addresses don't.
*/
@@ -14623,11 +14474,10 @@ index fb5cc5e1..b457784 100644
}
return pc;
}
-diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
-index 6bb7b85..dd853e1 100644
---- a/arch/x86/kernel/tls.c
-+++ b/arch/x86/kernel/tls.c
-@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
+diff -urNp linux-2.6.37/arch/x86/kernel/tls.c linux-2.6.37/arch/x86/kernel/tls.c
+--- linux-2.6.37/arch/x86/kernel/tls.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/tls.c 2011-01-17 02:41:01.000000000 -0500
+@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
@@ -14639,10 +14489,9 @@ index 6bb7b85..dd853e1 100644
set_tls_desc(p, idx, &info, 1);
return 0;
-diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
-index 8508237..229b664 100644
---- a/arch/x86/kernel/trampoline_32.S
-+++ b/arch/x86/kernel/trampoline_32.S
+diff -urNp linux-2.6.37/arch/x86/kernel/trampoline_32.S linux-2.6.37/arch/x86/kernel/trampoline_32.S
+--- linux-2.6.37/arch/x86/kernel/trampoline_32.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/trampoline_32.S 2011-01-17 02:41:01.000000000 -0500
@@ -32,6 +32,12 @@
#include <asm/segment.h>
#include <asm/page_types.h>
@@ -14665,10 +14514,9 @@ index 8508237..229b664 100644
# These need to be in the same 64K segment as the above;
# hence we don't use the boot_gdt_descr defined in head.S
-diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
-index 3af2dff..3b2d3ad 100644
---- a/arch/x86/kernel/trampoline_64.S
-+++ b/arch/x86/kernel/trampoline_64.S
+diff -urNp linux-2.6.37/arch/x86/kernel/trampoline_64.S linux-2.6.37/arch/x86/kernel/trampoline_64.S
+--- linux-2.6.37/arch/x86/kernel/trampoline_64.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/trampoline_64.S 2011-01-17 02:41:01.000000000 -0500
@@ -91,7 +91,7 @@ startup_32:
movl $__KERNEL_DS, %eax # Initialize the %ds segment register
movl %eax, %ds
@@ -14687,10 +14535,9 @@ index 3af2dff..3b2d3ad 100644
.long tgdt - r_base
.short 0
.quad 0x00cf9b000000ffff # __KERNEL32_CS
-diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
-index 4d0f3ed..b680aa1 100644
---- a/arch/x86/kernel/traps.c
-+++ b/arch/x86/kernel/traps.c
+diff -urNp linux-2.6.37/arch/x86/kernel/traps.c linux-2.6.37/arch/x86/kernel/traps.c
+--- linux-2.6.37/arch/x86/kernel/traps.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/traps.c 2011-01-17 02:41:01.000000000 -0500
@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
/* Do we ignore FPU interrupts ? */
@@ -14704,7 +14551,7 @@ index 4d0f3ed..b680aa1 100644
#endif
DECLARE_BITMAP(used_vectors, NR_VECTORS);
-@@ -110,13 +104,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
+@@ -110,13 +104,13 @@ static inline void preempt_conditional_c
}
static void __kprobes
@@ -14720,7 +14567,7 @@ index 4d0f3ed..b680aa1 100644
/*
* traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
* On nmi (interrupt 2), do_trap should not be called.
-@@ -127,7 +121,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+@@ -127,7 +121,7 @@ do_trap(int trapnr, int signr, char *str
}
#endif
@@ -14759,7 +14606,7 @@ index 4d0f3ed..b680aa1 100644
return;
#ifdef CONFIG_X86_32
-@@ -257,14 +263,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
+@@ -257,14 +263,30 @@ do_general_protection(struct pt_regs *re
conditional_sti(regs);
#ifdef CONFIG_X86_32
@@ -14806,7 +14653,7 @@ index 4d0f3ed..b680aa1 100644
die("general protection fault", regs, error_code);
}
-@@ -565,7 +594,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+@@ -572,7 +601,7 @@ dotraplinkage void __kprobes do_debug(st
/* It's safe to allow irq's after DR6 has been saved */
preempt_conditional_sti(regs);
@@ -14815,7 +14662,7 @@ index 4d0f3ed..b680aa1 100644
handle_vm86_trap((struct kernel_vm86_regs *) regs,
error_code, 1);
preempt_conditional_cli(regs);
-@@ -579,7 +608,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+@@ -586,7 +615,7 @@ dotraplinkage void __kprobes do_debug(st
* We already checked v86 mode above, so we can check for kernel mode
* by just checking the CPL of CS.
*/
@@ -14824,7 +14671,7 @@ index 4d0f3ed..b680aa1 100644
tsk->thread.debugreg6 &= ~DR_STEP;
set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
regs->flags &= ~X86_EFLAGS_TF;
-@@ -608,7 +637,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+@@ -615,7 +644,7 @@ void math_error(struct pt_regs *regs, in
return;
conditional_sti(regs);
@@ -14833,11 +14680,10 @@ index 4d0f3ed..b680aa1 100644
{
if (!fixup_exception(regs)) {
task->thread.error_code = error_code;
-diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
-index 4094ae0..8ba3527 100644
---- a/arch/x86/kernel/tsc.c
-+++ b/arch/x86/kernel/tsc.c
-@@ -833,7 +833,7 @@ static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
+diff -urNp linux-2.6.37/arch/x86/kernel/tsc.c linux-2.6.37/arch/x86/kernel/tsc.c
+--- linux-2.6.37/arch/x86/kernel/tsc.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/tsc.c 2011-01-17 02:41:01.000000000 -0500
+@@ -837,7 +837,7 @@ static struct dmi_system_id __initdata b
DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
},
},
@@ -14846,10 +14692,9 @@ index 4094ae0..8ba3527 100644
};
static void __init check_system_tsc_reliable(void)
-diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
-index 61fb985..5401452 100644
---- a/arch/x86/kernel/vm86_32.c
-+++ b/arch/x86/kernel/vm86_32.c
+diff -urNp linux-2.6.37/arch/x86/kernel/vm86_32.c linux-2.6.37/arch/x86/kernel/vm86_32.c
+--- linux-2.6.37/arch/x86/kernel/vm86_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/vm86_32.c 2011-01-17 02:41:01.000000000 -0500
@@ -41,6 +41,7 @@
#include <linux/ptrace.h>
#include <linux/audit.h>
@@ -14858,7 +14703,7 @@ index 61fb985..5401452 100644
#include <asm/uaccess.h>
#include <asm/io.h>
-@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
+@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
do_exit(SIGSEGV);
}
@@ -14867,7 +14712,7 @@ index 61fb985..5401452 100644
current->thread.sp0 = current->thread.saved_sp0;
current->thread.sysenter_cs = __KERNEL_CS;
load_sp0(tss, &current->thread);
-@@ -207,6 +208,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
+@@ -207,6 +208,13 @@ int sys_vm86old(struct vm86_struct __use
struct task_struct *tsk;
int tmp, ret = -EPERM;
@@ -14881,7 +14726,7 @@ index 61fb985..5401452 100644
tsk = current;
if (tsk->thread.saved_sp0)
goto out;
-@@ -237,6 +245,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
+@@ -237,6 +245,14 @@ int sys_vm86(unsigned long cmd, unsigned
int tmp, ret;
struct vm86plus_struct __user *v86;
@@ -14896,7 +14741,7 @@ index 61fb985..5401452 100644
tsk = current;
switch (cmd) {
case VM86_REQUEST_IRQ:
-@@ -323,7 +339,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
+@@ -323,7 +339,7 @@ static void do_sys_vm86(struct kernel_vm
tsk->thread.saved_fs = info->regs32->fs;
tsk->thread.saved_gs = get_user_gs(info->regs32);
@@ -14905,7 +14750,7 @@ index 61fb985..5401452 100644
tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
if (cpu_has_sep)
tsk->thread.sysenter_cs = 0;
-@@ -528,7 +544,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
+@@ -528,7 +544,7 @@ static void do_int(struct kernel_vm86_re
goto cannot_handle;
if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
goto cannot_handle;
@@ -14914,174 +14759,9 @@ index 61fb985..5401452 100644
if (get_user(segoffs, intr_ptr))
goto cannot_handle;
if ((segoffs >> 16) == BIOSSEG)
-diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
-index ce9fbac..26bc0c8 100644
---- a/arch/x86/kernel/vmi_32.c
-+++ b/arch/x86/kernel/vmi_32.c
-@@ -46,12 +46,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
- typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
-
- #define call_vrom_func(rom,func) \
-- (((VROMFUNC *)(rom->func))())
-+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
-
- #define call_vrom_long_func(rom,func,arg) \
-- (((VROMLONGFUNC *)(rom->func)) (arg))
--
--static struct vrom_header *vmi_rom;
-+({\
-+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
-+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
-+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
-+ __reloc;\
-+})
-+
-+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
- static int disable_pge;
- static int disable_pse;
- static int disable_sep;
-@@ -78,10 +83,10 @@ static struct {
- void (*set_initial_ap_state)(int, int);
- void (*halt)(void);
- void (*set_lazy_mode)(int mode);
--} vmi_ops;
-+} vmi_ops __read_only;
-
- /* Cached VMI operations */
--struct vmi_timer_ops vmi_timer_ops;
-+struct vmi_timer_ops vmi_timer_ops __read_only;
-
- /*
- * VMI patching routines.
-@@ -96,7 +101,7 @@ struct vmi_timer_ops vmi_timer_ops;
- static inline void patch_offset(void *insnbuf,
- unsigned long ip, unsigned long dest)
- {
-- *(unsigned long *)(insnbuf+1) = dest-ip-5;
-+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
- }
-
- static unsigned patch_internal(int call, unsigned len, void *insnbuf,
-@@ -104,6 +109,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
- {
- u64 reloc;
- struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
-+
- reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
- switch(rel->type) {
- case VMI_RELOCATION_CALL_REL:
-@@ -382,13 +388,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
-
- static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
- {
-- const pte_t pte = { .pte = 0 };
-+ const pte_t pte = __pte(0ULL);
- vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
- }
-
- static void vmi_pmd_clear(pmd_t *pmd)
- {
-- const pte_t pte = { .pte = 0 };
-+ const pte_t pte = __pte(0ULL);
- vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
- }
- #endif
-@@ -416,8 +422,8 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
- ap.ss = __KERNEL_DS;
- ap.esp = (unsigned long) start_esp;
-
-- ap.ds = __USER_DS;
-- ap.es = __USER_DS;
-+ ap.ds = __KERNEL_DS;
-+ ap.es = __KERNEL_DS;
- ap.fs = __KERNEL_PERCPU;
- ap.gs = __KERNEL_STACK_CANARY;
-
-@@ -464,6 +470,18 @@ static void vmi_leave_lazy_mmu(void)
- paravirt_leave_lazy_mmu();
- }
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+static unsigned long vmi_pax_open_kernel(void)
-+{
-+ return 0;
-+}
-+
-+static unsigned long vmi_pax_close_kernel(void)
-+{
-+ return 0;
-+}
-+#endif
-+
- static inline int __init check_vmi_rom(struct vrom_header *rom)
- {
- struct pci_header *pci;
-@@ -476,6 +494,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
- return 0;
- if (rom->vrom_signature != VMI_SIGNATURE)
- return 0;
-+ if (rom->rom_length * 512 > sizeof(*rom)) {
-+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
-+ return 0;
-+ }
- if (rom->api_version_maj != VMI_API_REV_MAJOR ||
- rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
- printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
-@@ -540,7 +562,7 @@ static inline int __init probe_vmi_rom(void)
- struct vrom_header *romstart;
- romstart = (struct vrom_header *)isa_bus_to_virt(base);
- if (check_vmi_rom(romstart)) {
-- vmi_rom = romstart;
-+ vmi_rom = *romstart;
- return 1;
- }
- }
-@@ -816,6 +838,11 @@ static inline int __init activate_vmi(void)
-
- para_fill(pv_irq_ops.safe_halt, Halt);
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
-+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
-+#endif
-+
- /*
- * Alternative instruction rewriting doesn't happen soon enough
- * to convert VMI_IRET to a call instead of a jump; so we have
-@@ -833,16 +860,16 @@ static inline int __init activate_vmi(void)
-
- void __init vmi_init(void)
- {
-- if (!vmi_rom)
-+ if (!vmi_rom.rom_signature)
- probe_vmi_rom();
- else
-- check_vmi_rom(vmi_rom);
-+ check_vmi_rom(&vmi_rom);
-
- /* In case probing for or validating the ROM failed, basil */
-- if (!vmi_rom)
-+ if (!vmi_rom.rom_signature)
- return;
-
-- reserve_top_address(-vmi_rom->virtual_top);
-+ reserve_top_address(-vmi_rom.virtual_top);
-
- #ifdef CONFIG_X86_IO_APIC
- /* This is virtual hardware; timer routing is wired correctly */
-@@ -854,7 +881,7 @@ void __init vmi_activate(void)
- {
- unsigned long flags;
-
-- if (!vmi_rom)
-+ if (!vmi_rom.rom_signature)
- return;
-
- local_irq_save(flags);
-diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
-index d0bb522..fdc8dce 100644
---- a/arch/x86/kernel/vmlinux.lds.S
-+++ b/arch/x86/kernel/vmlinux.lds.S
+diff -urNp linux-2.6.37/arch/x86/kernel/vmlinux.lds.S linux-2.6.37/arch/x86/kernel/vmlinux.lds.S
+--- linux-2.6.37/arch/x86/kernel/vmlinux.lds.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/vmlinux.lds.S 2011-01-17 02:41:01.000000000 -0500
@@ -26,6 +26,13 @@
#include <asm/page_types.h>
#include <asm/cache.h>
@@ -15096,7 +14776,7 @@ index d0bb522..fdc8dce 100644
#undef i386 /* in case the preprocessor is a 32bit one */
-@@ -34,13 +41,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
+@@ -34,11 +41,9 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
#ifdef CONFIG_X86_32
OUTPUT_ARCH(i386)
ENTRY(phys_startup_32)
@@ -15107,12 +14787,8 @@ index d0bb522..fdc8dce 100644
-jiffies_64 = jiffies;
#endif
-+jiffies = jiffies_64;
-+
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
- /*
- * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA
-@@ -69,31 +76,46 @@ jiffies_64 = jiffies;
+@@ -69,31 +74,46 @@ jiffies_64 = jiffies;
PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
@@ -15167,7 +14843,7 @@ index d0bb522..fdc8dce 100644
HEAD_TEXT
#ifdef CONFIG_X86_32
. = ALIGN(PAGE_SIZE);
-@@ -108,13 +130,52 @@ SECTIONS
+@@ -108,13 +128,47 @@ SECTIONS
IRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
@@ -15180,11 +14856,6 @@ index d0bb522..fdc8dce 100644
+
+#ifdef CONFIG_X86_32
+ . = ALIGN(PAGE_SIZE);
-+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
-+ *(.vmi.rom)
-+ } :module
-+
-+ . = ALIGN(PAGE_SIZE);
+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
+
+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
@@ -15202,37 +14873,37 @@ index d0bb522..fdc8dce 100644
+ /* End of text section */
+ _etext = . - __KERNEL_TEXT_OFFSET;
+ }
-+
+
+- EXCEPTION_TABLE(16) :text = 0x9090
+#ifdef CONFIG_X86_32
+ . = ALIGN(PAGE_SIZE);
+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
+ *(.idt)
+ . = ALIGN(PAGE_SIZE);
+ *(.empty_zero_page)
-+ *(.swapper_pg_fixmap)
-+ *(.swapper_pg_pmd)
++ *(.initial_pg_fixmap)
++ *(.initial_pg_pmd)
++ *(.initial_page_table)
+ *(.swapper_pg_dir)
-+ *(.trampoline_pg_dir)
+ } :rodata
+#endif
+
+ . = ALIGN(PAGE_SIZE);
+ NOTES :rodata :note
-
-- EXCEPTION_TABLE(16) :text = 0x9090
++
+ EXCEPTION_TABLE(16) :rodata
X64_ALIGN_DEBUG_RODATA_BEGIN
RO_DATA(PAGE_SIZE)
-@@ -122,16 +183,20 @@ SECTIONS
+@@ -122,16 +176,20 @@ SECTIONS
/* Data */
.data : AT(ADDR(.data) - LOAD_OFFSET) {
+
+#ifdef CONFIG_PAX_KERNEXEC
-+ . = ALIGN(HPAGE_SIZE);
++ . = ALIGN(HPAGE_SIZE);
+#else
-+ . = ALIGN(PAGE_SIZE);
++ . = ALIGN(PAGE_SIZE);
+#endif
+
/* Start of data section */
@@ -15248,7 +14919,16 @@ index d0bb522..fdc8dce 100644
PAGE_ALIGNED_DATA(PAGE_SIZE)
-@@ -194,12 +259,6 @@ SECTIONS
+@@ -140,6 +198,8 @@ SECTIONS
+ DATA_DATA
+ CONSTRUCTORS
+
++ jiffies = jiffies_64;
++
+ /* rarely changed data like cpu maps */
+ READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
+
+@@ -194,12 +254,6 @@ SECTIONS
}
vgetcpu_mode = VVIRT(.vgetcpu_mode);
@@ -15261,7 +14941,7 @@ index d0bb522..fdc8dce 100644
.vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
*(.vsyscall_3)
}
-@@ -215,12 +274,19 @@ SECTIONS
+@@ -215,12 +269,19 @@ SECTIONS
#endif /* CONFIG_X86_64 */
/* Init code and data - will be freed after init */
@@ -15284,7 +14964,7 @@ index d0bb522..fdc8dce 100644
/*
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
* output PHDR, so the next output section - .init.text - should
-@@ -229,12 +295,27 @@ SECTIONS
+@@ -229,12 +290,27 @@ SECTIONS
PERCPU_VADDR(0, :percpu)
#endif
@@ -15317,10 +14997,10 @@ index d0bb522..fdc8dce 100644
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
__x86_cpu_dev_start = .;
-@@ -260,19 +341,11 @@ SECTIONS
- *(.altinstr_replacement)
+@@ -288,19 +364,12 @@ SECTIONS
+ __iommu_table_end = .;
}
-
+ . = ALIGN(8);
- /*
- * .exit.text is discard at runtime, not link time, to deal with
- * references from .altinstructions and .eh_frame
@@ -15328,17 +15008,17 @@ index d0bb522..fdc8dce 100644
- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
- EXIT_TEXT
- }
--
+
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
EXIT_DATA
}
-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
+#ifndef CONFIG_SMP
- PERCPU(PAGE_SIZE)
+ PERCPU(THREAD_SIZE)
#endif
-@@ -291,16 +364,10 @@ SECTIONS
+@@ -319,16 +388,10 @@ SECTIONS
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
__smp_locks = .;
*(.smp_locks)
@@ -15356,7 +15036,7 @@ index d0bb522..fdc8dce 100644
/* BSS */
. = ALIGN(PAGE_SIZE);
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
-@@ -316,6 +383,7 @@ SECTIONS
+@@ -344,6 +407,7 @@ SECTIONS
__brk_base = .;
. += 64 * 1024; /* 64k alignment slop space */
*(.brk_reservation) /* areas brk users have reserved */
@@ -15364,7 +15044,7 @@ index d0bb522..fdc8dce 100644
__brk_limit = .;
}
-@@ -342,13 +410,12 @@ SECTIONS
+@@ -370,13 +434,12 @@ SECTIONS
* for the boot processor.
*/
#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
@@ -15379,11 +15059,10 @@ index d0bb522..fdc8dce 100644
"kernel image bigger than KERNEL_IMAGE_SIZE");
#ifdef CONFIG_SMP
-diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
-index 1c0c6ab..937756d 100644
---- a/arch/x86/kernel/vsyscall_64.c
-+++ b/arch/x86/kernel/vsyscall_64.c
-@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
+diff -urNp linux-2.6.37/arch/x86/kernel/vsyscall_64.c linux-2.6.37/arch/x86/kernel/vsyscall_64.c
+--- linux-2.6.37/arch/x86/kernel/vsyscall_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/vsyscall_64.c 2011-01-17 02:41:01.000000000 -0500
+@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
/* copy vsyscall data */
@@ -15391,7 +15070,7 @@ index 1c0c6ab..937756d 100644
vsyscall_gtod_data.clock.vread = clock->vread;
vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
vsyscall_gtod_data.clock.mask = clock->mask;
-@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
+@@ -208,7 +209,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
We do this here because otherwise user space would do it on
its own in a likely inferior way (no access to jiffies).
If you don't like it pass NULL. */
@@ -15400,10 +15079,9 @@ index 1c0c6ab..937756d 100644
p = tcache->blob[1];
} else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
/* Load per CPU data from RDTSCP */
-diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
-index 1b950d1..ca83b2a 100644
---- a/arch/x86/kernel/x8664_ksyms_64.c
-+++ b/arch/x86/kernel/x8664_ksyms_64.c
+diff -urNp linux-2.6.37/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.37/arch/x86/kernel/x8664_ksyms_64.c
+--- linux-2.6.37/arch/x86/kernel/x8664_ksyms_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/x8664_ksyms_64.c 2011-01-17 02:41:01.000000000 -0500
@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(copy_user_generic_string);
EXPORT_SYMBOL(copy_user_generic_unrolled);
@@ -15413,20 +15091,19 @@ index 1b950d1..ca83b2a 100644
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
-diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
-index 37e68fc..121259e 100644
---- a/arch/x86/kernel/xsave.c
-+++ b/arch/x86/kernel/xsave.c
-@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
+diff -urNp linux-2.6.37/arch/x86/kernel/xsave.c linux-2.6.37/arch/x86/kernel/xsave.c
+--- linux-2.6.37/arch/x86/kernel/xsave.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kernel/xsave.c 2011-01-17 02:41:01.000000000 -0500
+@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
fx_sw_user->xstate_size > fx_sw_user->extended_size)
- return -1;
+ return -EINVAL;
- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
fx_sw_user->extended_size -
FP_XSTATE_MAGIC2_SIZE));
- /*
-@@ -196,7 +196,7 @@ fx_only:
+ if (err)
+@@ -267,7 +267,7 @@ fx_only:
* the other extended state.
*/
xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
@@ -15435,7 +15112,7 @@ index 37e68fc..121259e 100644
}
/*
-@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
+@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
if (use_xsave())
err = restore_user_xstate(buf);
else
@@ -15444,34 +15121,27 @@ index 37e68fc..121259e 100644
buf);
if (unlikely(err)) {
/*
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 582c8fc..24806a4 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -88,11 +88,11 @@
- #define Src2CL (1<<29)
+diff -urNp linux-2.6.37/arch/x86/kvm/emulate.c linux-2.6.37/arch/x86/kvm/emulate.c
+--- linux-2.6.37/arch/x86/kvm/emulate.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kvm/emulate.c 2011-01-17 02:41:01.000000000 -0500
+@@ -96,7 +96,7 @@
#define Src2ImmByte (2<<29)
#define Src2One (3<<29)
--#define Src2Imm16 (4<<29)
--#define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be
-+#define Src2Imm16 (4U<<29)
-+#define Src2Mem16 (5U<<29) /* Used for Ep encoding. First argument has to be
- in memory and second argument is located
- immediately after the first one in memory. */
+ #define Src2Imm (4<<29)
-#define Src2Mask (7<<29)
+#define Src2Mask (7U<<29)
- enum {
- Group1_80, Group1_81, Group1_82, Group1_83,
-@@ -446,6 +446,7 @@ static u32 group2_table[] = {
+ #define X2(x...) x, x
+ #define X3(x...) X2(x), x
+@@ -197,6 +197,7 @@ struct group_dual {
- #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
+ #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
do { \
+ unsigned long _tmp; \
__asm__ __volatile__ ( \
_PRE_EFLAGS("0", "4", "2") \
_op _suffix " %"_x"3,%1; " \
-@@ -459,8 +460,6 @@ static u32 group2_table[] = {
+@@ -210,8 +211,6 @@ struct group_dual {
/* Raw emulation: instruction has two explicit operands. */
#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
do { \
@@ -15479,20 +15149,19 @@ index 582c8fc..24806a4 100644
- \
switch ((_dst).bytes) { \
case 2: \
- ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
-@@ -476,7 +475,6 @@ static u32 group2_table[] = {
+ ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
+@@ -227,7 +226,6 @@ struct group_dual {
#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
do { \
- unsigned long _tmp; \
switch ((_dst).bytes) { \
case 1: \
- ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
-diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 1eb7a4a..66513e00 100644
---- a/arch/x86/kvm/lapic.c
-+++ b/arch/x86/kvm/lapic.c
-@@ -52,7 +52,7 @@
+ ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
+diff -urNp linux-2.6.37/arch/x86/kvm/lapic.c linux-2.6.37/arch/x86/kvm/lapic.c
+--- linux-2.6.37/arch/x86/kvm/lapic.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kvm/lapic.c 2011-01-17 02:41:01.000000000 -0500
+@@ -53,7 +53,7 @@
#define APIC_BUS_CYCLE_NS 1
/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
@@ -15501,11 +15170,10 @@ index 1eb7a4a..66513e00 100644
#define APIC_LVT_NUM 6
/* 14 is the version for Xeon and Pentium 8.4.8*/
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index 9ca7032..36aa477 100644
---- a/arch/x86/kvm/svm.c
-+++ b/arch/x86/kvm/svm.c
-@@ -2825,7 +2825,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
+diff -urNp linux-2.6.37/arch/x86/kvm/svm.c linux-2.6.37/arch/x86/kvm/svm.c
+--- linux-2.6.37/arch/x86/kvm/svm.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kvm/svm.c 2011-01-17 02:41:01.000000000 -0500
+@@ -3023,7 +3023,11 @@ static void reload_tss(struct kvm_vcpu *
int cpu = raw_smp_processor_id();
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
@@ -15517,7 +15185,7 @@ index 9ca7032..36aa477 100644
load_TR_desc();
}
-@@ -3370,7 +3374,7 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
+@@ -3600,7 +3604,7 @@ static void svm_fpu_deactivate(struct kv
update_cr0_intercept(svm);
}
@@ -15526,14 +15194,13 @@ index 9ca7032..36aa477 100644
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
.hardware_setup = svm_hardware_setup,
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index 39ac456..37d565b 100644
---- a/arch/x86/kvm/vmx.c
-+++ b/arch/x86/kvm/vmx.c
-@@ -654,7 +654,11 @@ static void reload_tss(void)
+diff -urNp linux-2.6.37/arch/x86/kvm/vmx.c linux-2.6.37/arch/x86/kvm/vmx.c
+--- linux-2.6.37/arch/x86/kvm/vmx.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kvm/vmx.c 2011-01-17 02:41:01.000000000 -0500
+@@ -705,7 +705,11 @@ static void reload_tss(void)
+ struct desc_struct *descs;
- native_store_gdt(&gdt);
- descs = (void *)gdt.address;
+ descs = (void *)gdt->address;
+
+ pax_open_kernel();
descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
@@ -15542,7 +15209,7 @@ index 39ac456..37d565b 100644
load_TR_desc();
}
-@@ -1554,8 +1558,11 @@ static __init int hardware_setup(void)
+@@ -1589,8 +1593,11 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
@@ -15556,7 +15223,7 @@ index 39ac456..37d565b 100644
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
-@@ -2537,7 +2544,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
+@@ -2575,7 +2582,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
@@ -15565,7 +15232,7 @@ index 39ac456..37d565b 100644
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
-@@ -3913,6 +3920,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -3959,6 +3966,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
"jmp .Lkvm_vmx_return \n\t"
".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
".Lkvm_vmx_return: "
@@ -15578,7 +15245,7 @@ index 39ac456..37d565b 100644
/* Save guest registers, load host registers, keep flags */
"xchg %0, (%%"R"sp) \n\t"
"mov %%"R"ax, %c[rax](%0) \n\t"
-@@ -3959,8 +3972,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -4005,6 +4018,11 @@ static void vmx_vcpu_run(struct kvm_vcpu
[r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
#endif
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
@@ -15588,21 +15255,18 @@ index 39ac456..37d565b 100644
+#endif
+
: "cc", "memory"
-- , R"bx", R"di", R"si"
-+ , R"ax", R"bx", R"di", R"si"
+ , R"ax", R"bx", R"di", R"si"
#ifdef CONFIG_X86_64
- , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
- #endif
-@@ -3974,7 +3992,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
- if (vmx->rmode.irq.pending)
- fixup_rmode_irq(vmx);
+@@ -4018,7 +4036,7 @@ static void vmx_vcpu_run(struct kvm_vcpu
+
+ vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
vmx->launched = 1;
- vmx_complete_interrupts(vmx);
-@@ -4195,7 +4213,7 @@ static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+ vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+@@ -4253,7 +4271,7 @@ static void vmx_set_supported_cpuid(u32
{
}
@@ -15611,11 +15275,10 @@ index 39ac456..37d565b 100644
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup,
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index eee5cdd..415d3fd 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -86,7 +86,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
+diff -urNp linux-2.6.37/arch/x86/kvm/x86.c linux-2.6.37/arch/x86/kvm/x86.c
+--- linux-2.6.37/arch/x86/kvm/x86.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/kvm/x86.c 2011-01-17 02:41:01.000000000 -0500
+@@ -92,7 +92,7 @@ static void update_cr8_intercept(struct
static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
@@ -15624,7 +15287,7 @@ index eee5cdd..415d3fd 100644
EXPORT_SYMBOL_GPL(kvm_x86_ops);
int ignore_msrs = 0;
-@@ -112,38 +112,38 @@ static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
+@@ -118,38 +118,38 @@ static struct kvm_shared_msrs_global __r
static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
struct kvm_stats_debugfs_item debugfs_entries[] = {
@@ -15695,7 +15358,7 @@ index eee5cdd..415d3fd 100644
{ NULL }
};
-@@ -1672,6 +1672,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+@@ -1979,6 +1979,8 @@ long kvm_arch_dev_ioctl(struct file *fil
if (n < msr_list.nmsrs)
goto out;
r = -EFAULT;
@@ -15704,7 +15367,7 @@ index eee5cdd..415d3fd 100644
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
-@@ -2103,7 +2105,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
+@@ -2446,7 +2448,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
@@ -15713,7 +15376,7 @@ index eee5cdd..415d3fd 100644
return -EINVAL;
if (irqchip_in_kernel(vcpu->kvm))
return -ENXIO;
-@@ -4076,10 +4078,10 @@ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
+@@ -4623,10 +4625,10 @@ void kvm_after_handle_nmi(struct kvm_vcp
}
EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
@@ -15726,10 +15389,9 @@ index eee5cdd..415d3fd 100644
if (kvm_x86_ops) {
printk(KERN_ERR "kvm: already loaded the other module\n");
-diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
-index 71e080d..9a90110 100644
---- a/arch/x86/lib/atomic64_cx8_32.S
-+++ b/arch/x86/lib/atomic64_cx8_32.S
+diff -urNp linux-2.6.37/arch/x86/lib/atomic64_cx8_32.S linux-2.6.37/arch/x86/lib/atomic64_cx8_32.S
+--- linux-2.6.37/arch/x86/lib/atomic64_cx8_32.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/atomic64_cx8_32.S 2011-01-17 02:41:01.000000000 -0500
@@ -86,13 +86,23 @@ ENTRY(atomic64_\func\()_return_cx8)
movl %edx, %ecx
\ins\()l %esi, %ebx
@@ -15810,10 +15472,9 @@ index 71e080d..9a90110 100644
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
-diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
-index adbccd0..d9f12e3 100644
---- a/arch/x86/lib/checksum_32.S
-+++ b/arch/x86/lib/checksum_32.S
+diff -urNp linux-2.6.37/arch/x86/lib/checksum_32.S linux-2.6.37/arch/x86/lib/checksum_32.S
+--- linux-2.6.37/arch/x86/lib/checksum_32.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/checksum_32.S 2011-01-17 02:41:01.000000000 -0500
@@ -28,7 +28,8 @@
#include <linux/linkage.h>
#include <asm/dwarf2.h>
@@ -15824,7 +15485,7 @@ index adbccd0..d9f12e3 100644
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
-@@ -304,9 +305,22 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
+@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
#define ARGBASE 16
#define FP 12
@@ -15833,23 +15494,29 @@ index adbccd0..d9f12e3 100644
+
+ENTRY(csum_partial_copy_generic_to_user)
CFI_STARTPROC
-+ pushl $(__USER_DS)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl %gs
+ CFI_ADJUST_CFA_OFFSET 4
+ popl %es
+ CFI_ADJUST_CFA_OFFSET -4
+ jmp csum_partial_copy_generic
++#endif
+
+ENTRY(csum_partial_copy_generic_from_user)
-+ pushl $(__USER_DS)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl %gs
+ CFI_ADJUST_CFA_OFFSET 4
+ popl %ds
+ CFI_ADJUST_CFA_OFFSET -4
++#endif
+
+ENTRY(csum_partial_copy_generic)
subl $4,%esp
CFI_ADJUST_CFA_OFFSET 4
pushl %edi
-@@ -331,7 +345,7 @@ ENTRY(csum_partial_copy_generic)
+@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
jmp 4f
SRC(1: movw (%esi), %bx )
addl $2, %esi
@@ -15858,7 +15525,7 @@ index adbccd0..d9f12e3 100644
addl $2, %edi
addw %bx, %ax
adcl $0, %eax
-@@ -343,30 +357,30 @@ DST( movw %bx, (%edi) )
+@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
SRC(1: movl (%esi), %ebx )
SRC( movl 4(%esi), %edx )
adcl %ebx, %eax
@@ -15897,7 +15564,7 @@ index adbccd0..d9f12e3 100644
lea 32(%esi), %esi
lea 32(%edi), %edi
-@@ -380,7 +394,7 @@ DST( movl %edx, 28(%edi) )
+@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
shrl $2, %edx # This clears CF
SRC(3: movl (%esi), %ebx )
adcl %ebx, %eax
@@ -15906,7 +15573,7 @@ index adbccd0..d9f12e3 100644
lea 4(%esi), %esi
lea 4(%edi), %edi
dec %edx
-@@ -392,12 +406,12 @@ DST( movl %ebx, (%edi) )
+@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
jb 5f
SRC( movw (%esi), %cx )
leal 2(%esi), %esi
@@ -15921,7 +15588,7 @@ index adbccd0..d9f12e3 100644
6: addl %ecx, %eax
adcl $0, %eax
7:
-@@ -408,7 +422,7 @@ DST( movb %cl, (%edi) )
+@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
6001:
movl ARGBASE+20(%esp), %ebx # src_err_ptr
@@ -15930,7 +15597,7 @@ index adbccd0..d9f12e3 100644
# zero the complete destination - computing the rest
# is too much work
-@@ -421,11 +435,19 @@ DST( movb %cl, (%edi) )
+@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
6002:
movl ARGBASE+24(%esp), %ebx # dst_err_ptr
@@ -15951,7 +15618,7 @@ index adbccd0..d9f12e3 100644
popl %ebx
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE ebx
-@@ -439,26 +461,41 @@ DST( movb %cl, (%edi) )
+@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
CFI_ADJUST_CFA_OFFSET -4
ret
CFI_ENDPROC
@@ -15982,23 +15649,29 @@ index adbccd0..d9f12e3 100644
+
+ENTRY(csum_partial_copy_generic_to_user)
CFI_STARTPROC
-+ pushl $(__USER_DS)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl %gs
+ CFI_ADJUST_CFA_OFFSET 4
+ popl %es
+ CFI_ADJUST_CFA_OFFSET -4
+ jmp csum_partial_copy_generic
++#endif
+
+ENTRY(csum_partial_copy_generic_from_user)
-+ pushl $(__USER_DS)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl %gs
+ CFI_ADJUST_CFA_OFFSET 4
+ popl %ds
+ CFI_ADJUST_CFA_OFFSET -4
++#endif
+
+ENTRY(csum_partial_copy_generic)
pushl %ebx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebx, 0
-@@ -482,7 +519,7 @@ ENTRY(csum_partial_copy_generic)
+@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
subl %ebx, %edi
lea -1(%esi),%edx
andl $-32,%edx
@@ -16007,7 +15680,7 @@ index adbccd0..d9f12e3 100644
testl %esi, %esi
jmp *%ebx
1: addl $64,%esi
-@@ -503,19 +540,19 @@ ENTRY(csum_partial_copy_generic)
+@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
jb 5f
SRC( movw (%esi), %dx )
leal 2(%esi), %esi
@@ -16030,7 +15703,7 @@ index adbccd0..d9f12e3 100644
# zero the complete destination (computing the rest is too much work)
movl ARGBASE+8(%esp),%edi # dst
movl ARGBASE+12(%esp),%ecx # len
-@@ -523,10 +560,18 @@ DST( movb %dl, (%edi) )
+@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
rep; stosb
jmp 7b
6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
@@ -16039,6 +15712,7 @@ index adbccd0..d9f12e3 100644
jmp 7b
.previous
++#ifdef CONFIG_PAX_MEMORY_UDEREF
+ pushl %ss
+ CFI_ADJUST_CFA_OFFSET 4
+ popl %ds
@@ -16047,10 +15721,12 @@ index adbccd0..d9f12e3 100644
+ CFI_ADJUST_CFA_OFFSET 4
+ popl %es
+ CFI_ADJUST_CFA_OFFSET -4
++#endif
++
popl %esi
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE esi
-@@ -538,7 +583,7 @@ DST( movb %dl, (%edi) )
+@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
CFI_RESTORE ebx
ret
CFI_ENDPROC
@@ -16059,10 +15735,9 @@ index adbccd0..d9f12e3 100644
#undef ROUND
#undef ROUND1
-diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
-index ebeafcc..609dc8a 100644
---- a/arch/x86/lib/clear_page_64.S
-+++ b/arch/x86/lib/clear_page_64.S
+diff -urNp linux-2.6.37/arch/x86/lib/clear_page_64.S linux-2.6.37/arch/x86/lib/clear_page_64.S
+--- linux-2.6.37/arch/x86/lib/clear_page_64.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/clear_page_64.S 2011-01-17 02:41:01.000000000 -0500
@@ -43,7 +43,7 @@ ENDPROC(clear_page)
#include <asm/cpufeature.h>
@@ -16072,10 +15747,9 @@ index ebeafcc..609dc8a 100644
1: .byte 0xeb /* jmp <disp8> */
.byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
2:
-diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
-index 727a5d4..fd61ff4 100644
---- a/arch/x86/lib/copy_page_64.S
-+++ b/arch/x86/lib/copy_page_64.S
+diff -urNp linux-2.6.37/arch/x86/lib/copy_page_64.S linux-2.6.37/arch/x86/lib/copy_page_64.S
+--- linux-2.6.37/arch/x86/lib/copy_page_64.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/copy_page_64.S 2011-01-17 02:41:01.000000000 -0500
@@ -104,7 +104,7 @@ ENDPROC(copy_page)
#include <asm/cpufeature.h>
@@ -16085,10 +15759,9 @@ index 727a5d4..fd61ff4 100644
1: .byte 0xeb /* jmp <disp8> */
.byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
2:
-diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
-index 71100c9..31abdb0 100644
---- a/arch/x86/lib/copy_user_64.S
-+++ b/arch/x86/lib/copy_user_64.S
+diff -urNp linux-2.6.37/arch/x86/lib/copy_user_64.S linux-2.6.37/arch/x86/lib/copy_user_64.S
+--- linux-2.6.37/arch/x86/lib/copy_user_64.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/copy_user_64.S 2011-01-17 02:41:01.000000000 -0500
@@ -15,13 +15,14 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
@@ -16145,10 +15818,9 @@ index 71100c9..31abdb0 100644
movl %edx,%ecx
xorl %eax,%eax
rep
-diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
-index cb0c112..39e53b9 100644
---- a/arch/x86/lib/copy_user_nocache_64.S
-+++ b/arch/x86/lib/copy_user_nocache_64.S
+diff -urNp linux-2.6.37/arch/x86/lib/copy_user_nocache_64.S linux-2.6.37/arch/x86/lib/copy_user_nocache_64.S
+--- linux-2.6.37/arch/x86/lib/copy_user_nocache_64.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/copy_user_nocache_64.S 2011-01-17 02:41:01.000000000 -0500
@@ -14,6 +14,7 @@
#include <asm/current.h>
#include <asm/asm-offsets.h>
@@ -16173,11 +15845,10 @@ index cb0c112..39e53b9 100644
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
ALIGN_DESTINATION
-diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
-index 459b58a..bb5720e 100644
---- a/arch/x86/lib/csum-wrappers_64.c
-+++ b/arch/x86/lib/csum-wrappers_64.c
-@@ -52,6 +52,8 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
+diff -urNp linux-2.6.37/arch/x86/lib/csum-wrappers_64.c linux-2.6.37/arch/x86/lib/csum-wrappers_64.c
+--- linux-2.6.37/arch/x86/lib/csum-wrappers_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/csum-wrappers_64.c 2011-01-17 02:41:01.000000000 -0500
+@@ -52,6 +52,8 @@ csum_partial_copy_from_user(const void _
len -= 2;
}
}
@@ -16186,7 +15857,7 @@ index 459b58a..bb5720e 100644
isum = csum_partial_copy_generic((__force const void *)src,
dst, len, isum, errp, NULL);
if (unlikely(*errp))
-@@ -105,6 +107,8 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
+@@ -105,6 +107,8 @@ csum_partial_copy_to_user(const void *sr
}
*errp = 0;
@@ -16195,28 +15866,31 @@ index 459b58a..bb5720e 100644
return csum_partial_copy_generic(src, (void __force *)dst,
len, isum, NULL, errp);
}
-diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
-index 51f1504..2f490c0 100644
---- a/arch/x86/lib/getuser.S
-+++ b/arch/x86/lib/getuser.S
-@@ -33,14 +33,38 @@
+diff -urNp linux-2.6.37/arch/x86/lib/getuser.S linux-2.6.37/arch/x86/lib/getuser.S
+--- linux-2.6.37/arch/x86/lib/getuser.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/getuser.S 2011-01-17 02:41:01.000000000 -0500
+@@ -33,14 +33,35 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
+#include <asm/segment.h>
+#include <asm/pgtable.h>
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg %gs:
++#else
++#define __copyuser_seg
++#endif
.text
ENTRY(__get_user_1)
CFI_STARTPROC
+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl $(__USER_DS)
-+ popl %ds
-+#else
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
+-1: movzb (%_ASM_AX),%edx
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
@@ -16228,29 +15902,21 @@ index 51f1504..2f490c0 100644
+
+#endif
+
- 1: movzb (%_ASM_AX),%edx
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl %ss
-+ pop %ds
-+#endif
-+
++1: movzb __copyuser_seg (%_ASM_AX),%edx
xor %eax,%eax
ret
CFI_ENDPROC
-@@ -49,11 +73,33 @@ ENDPROC(__get_user_1)
+@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
ENTRY(__get_user_2)
CFI_STARTPROC
add $1,%_ASM_AX
+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl $(__USER_DS)
-+ popl %ds
-+#else
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
jc bad_get_user
GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
+-2: movzwl -1(%_ASM_AX),%edx
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
@@ -16262,29 +15928,21 @@ index 51f1504..2f490c0 100644
+
+#endif
+
- 2: movzwl -1(%_ASM_AX),%edx
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl %ss
-+ pop %ds
-+#endif
-+
++2: movzwl __copyuser_seg -1(%_ASM_AX),%edx
xor %eax,%eax
ret
CFI_ENDPROC
-@@ -62,11 +108,33 @@ ENDPROC(__get_user_2)
+@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
ENTRY(__get_user_4)
CFI_STARTPROC
add $3,%_ASM_AX
+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl $(__USER_DS)
-+ popl %ds
-+#else
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
jc bad_get_user
GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
+-3: mov -3(%_ASM_AX),%edx
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
@@ -16296,17 +15954,11 @@ index 51f1504..2f490c0 100644
+
+#endif
+
- 3: mov -3(%_ASM_AX),%edx
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl %ss
-+ pop %ds
-+#endif
-+
++3: mov __copyuser_seg -3(%_ASM_AX),%edx
xor %eax,%eax
ret
CFI_ENDPROC
-@@ -80,6 +148,15 @@ ENTRY(__get_user_8)
+@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
@@ -16322,32 +15974,22 @@ index 51f1504..2f490c0 100644
4: movq -7(%_ASM_AX),%_ASM_DX
xor %eax,%eax
ret
-@@ -89,6 +166,12 @@ ENDPROC(__get_user_8)
-
- bad_get_user:
- CFI_STARTPROC
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl %ss
-+ pop %ds
-+#endif
-+
- xor %edx,%edx
- mov $(-EFAULT),%_ASM_AX
- ret
-diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
-index 9f33b98..e3a194d 100644
---- a/arch/x86/lib/insn.c
-+++ b/arch/x86/lib/insn.c
-@@ -21,6 +21,7 @@
+diff -urNp linux-2.6.37/arch/x86/lib/insn.c linux-2.6.37/arch/x86/lib/insn.c
+--- linux-2.6.37/arch/x86/lib/insn.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/insn.c 2011-01-24 18:04:15.000000000 -0500
+@@ -21,6 +21,11 @@
#include <linux/string.h>
#include <asm/inat.h>
#include <asm/insn.h>
++#ifdef __KERNEL__
+#include <asm/pgtable_types.h>
++#else
++#define ktla_ktva(addr) addr
++#endif
#define get_next(t, insn) \
({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
-@@ -40,8 +41,8 @@
+@@ -40,8 +45,8 @@
void insn_init(struct insn *insn, const void *kaddr, int x86_64)
{
memset(insn, 0, sizeof(*insn));
@@ -16358,11 +16000,10 @@ index 9f33b98..e3a194d 100644
insn->x86_64 = x86_64 ? 1 : 0;
insn->opnd_bytes = 4;
if (x86_64)
-diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
-index c9f2d9b..e7fd2c0 100644
---- a/arch/x86/lib/mmx_32.c
-+++ b/arch/x86/lib/mmx_32.c
-@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
+diff -urNp linux-2.6.37/arch/x86/lib/mmx_32.c linux-2.6.37/arch/x86/lib/mmx_32.c
+--- linux-2.6.37/arch/x86/lib/mmx_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/mmx_32.c 2011-01-17 02:41:01.000000000 -0500
+@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
{
void *p;
int i;
@@ -16370,7 +16011,7 @@ index c9f2d9b..e7fd2c0 100644
if (unlikely(in_interrupt()))
return __memcpy(to, from, len);
-@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
+@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
kernel_fpu_begin();
__asm__ __volatile__ (
@@ -16477,7 +16118,7 @@ index c9f2d9b..e7fd2c0 100644
kernel_fpu_begin();
-@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
+@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
* but that is for later. -AV
*/
__asm__ __volatile__(
@@ -16677,10 +16318,9 @@ index c9f2d9b..e7fd2c0 100644
from += 64;
to += 64;
-diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
-index 36b0d15..4c35b79 100644
---- a/arch/x86/lib/putuser.S
-+++ b/arch/x86/lib/putuser.S
+diff -urNp linux-2.6.37/arch/x86/lib/putuser.S linux-2.6.37/arch/x86/lib/putuser.S
+--- linux-2.6.37/arch/x86/lib/putuser.S 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/putuser.S 2011-01-17 02:41:01.000000000 -0500
@@ -15,7 +15,8 @@
#include <asm/thread_info.h>
#include <asm/errno.h>
@@ -16691,7 +16331,7 @@ index 36b0d15..4c35b79 100644
/*
* __put_user_X
-@@ -29,59 +30,162 @@
+@@ -29,52 +30,119 @@
* as they get called from within inline assembly.
*/
@@ -16707,14 +16347,17 @@ index 36b0d15..4c35b79 100644
+#define _DEST %_ASM_CX
+#endif
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg %gs:
++#else
++#define __copyuser_seg
++#endif
++
.text
ENTRY(__put_user_1)
ENTER
+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl $(__USER_DS)
-+ popl %ds
-+#else
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ GET_THREAD_INFO(%_ASM_BX)
cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
jae bad_put_user
@@ -16730,13 +16373,7 @@ index 36b0d15..4c35b79 100644
+
+#endif
+
-+1: movb %al,(_DEST)
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl %ss
-+ popl %ds
-+#endif
-+
++1: movb %al,__copyuser_seg (_DEST)
xor %eax,%eax
EXIT
ENDPROC(__put_user_1)
@@ -16744,10 +16381,7 @@ index 36b0d15..4c35b79 100644
ENTRY(__put_user_2)
ENTER
+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl $(__USER_DS)
-+ popl %ds
-+#else
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ GET_THREAD_INFO(%_ASM_BX)
mov TI_addr_limit(%_ASM_BX),%_ASM_BX
sub $1,%_ASM_BX
@@ -16765,13 +16399,7 @@ index 36b0d15..4c35b79 100644
+
+#endif
+
-+2: movw %ax,(_DEST)
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl %ss
-+ popl %ds
-+#endif
-+
++2: movw %ax,__copyuser_seg (_DEST)
xor %eax,%eax
EXIT
ENDPROC(__put_user_2)
@@ -16779,10 +16407,7 @@ index 36b0d15..4c35b79 100644
ENTRY(__put_user_4)
ENTER
+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl $(__USER_DS)
-+ popl %ds
-+#else
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ GET_THREAD_INFO(%_ASM_BX)
mov TI_addr_limit(%_ASM_BX),%_ASM_BX
sub $3,%_ASM_BX
@@ -16800,13 +16425,7 @@ index 36b0d15..4c35b79 100644
+
+#endif
+
-+3: movl %eax,(_DEST)
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl %ss
-+ popl %ds
-+#endif
-+
++3: movl %eax,__copyuser_seg (_DEST)
xor %eax,%eax
EXIT
ENDPROC(__put_user_4)
@@ -16814,10 +16433,7 @@ index 36b0d15..4c35b79 100644
ENTRY(__put_user_8)
ENTER
+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl $(__USER_DS)
-+ popl %ds
-+#else
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ GET_THREAD_INFO(%_ASM_BX)
mov TI_addr_limit(%_ASM_BX),%_ASM_BX
sub $7,%_ASM_BX
@@ -16835,217 +16451,55 @@ index 36b0d15..4c35b79 100644
+
+#endif
+
-+4: mov %_ASM_AX,(_DEST)
++4: mov %_ASM_AX,__copyuser_seg (_DEST)
#ifdef CONFIG_X86_32
-5: movl %edx,4(%_ASM_CX)
-+5: movl %edx,4(_DEST)
++5: movl %edx,__copyuser_seg 4(_DEST)
#endif
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl %ss
-+ popl %ds
-+#endif
-+
xor %eax,%eax
EXIT
- ENDPROC(__put_user_8)
-
- bad_put_user:
- CFI_STARTPROC
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pushl %ss
-+ popl %ds
-+#endif
-+
- movl $-EFAULT,%eax
- EXIT
- END(bad_put_user)
-diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
-index e218d5d..5f0615c 100644
---- a/arch/x86/lib/usercopy_32.c
-+++ b/arch/x86/lib/usercopy_32.c
-@@ -36,31 +36,38 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
- * Copy a null terminated string from userspace.
- */
-
--#define __do_strncpy_from_user(dst, src, count, res) \
--do { \
-- int __d0, __d1, __d2; \
-- might_fault(); \
-- __asm__ __volatile__( \
-- " testl %1,%1\n" \
-- " jz 2f\n" \
+diff -urNp linux-2.6.37/arch/x86/lib/usercopy_32.c linux-2.6.37/arch/x86/lib/usercopy_32.c
+--- linux-2.6.37/arch/x86/lib/usercopy_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/usercopy_32.c 2011-01-17 02:41:01.000000000 -0500
+@@ -43,7 +43,7 @@ do { \
+ __asm__ __volatile__( \
+ " testl %1,%1\n" \
+ " jz 2f\n" \
- "0: lodsb\n" \
-- " stosb\n" \
-- " testb %%al,%%al\n" \
-- " jz 1f\n" \
-- " decl %1\n" \
-- " jnz 0b\n" \
-- "1: subl %1,%0\n" \
-- "2:\n" \
-- ".section .fixup,\"ax\"\n" \
-- "3: movl %5,%0\n" \
-- " jmp 2b\n" \
-- ".previous\n" \
-- _ASM_EXTABLE(0b,3b) \
-- : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
-- "=&D" (__d2) \
-- : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
-- : "memory"); \
--} while (0)
-+static long __do_strncpy_from_user(char *dst, const char __user *src, long count)
-+{
-+ int __d0, __d1, __d2;
-+ long res = -EFAULT;
-+
-+ might_fault();
-+ __asm__ __volatile__(
-+ " movw %w10,%%ds\n"
-+ " testl %1,%1\n"
-+ " jz 2f\n"
-+ "0: lodsb\n"
-+ " stosb\n"
-+ " testb %%al,%%al\n"
-+ " jz 1f\n"
-+ " decl %1\n"
-+ " jnz 0b\n"
-+ "1: subl %1,%0\n"
-+ "2:\n"
-+ " pushl %%ss\n"
-+ " popl %%ds\n"
-+ ".section .fixup,\"ax\"\n"
-+ "3: movl %5,%0\n"
-+ " jmp 2b\n"
-+ ".previous\n"
-+ _ASM_EXTABLE(0b,3b)
-+ : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1),
-+ "=&D" (__d2)
-+ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst),
-+ "r"(__USER_DS)
-+ : "memory");
-+ return res;
-+}
-
- /**
- * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
-@@ -85,9 +92,7 @@ do { \
- long
- __strncpy_from_user(char *dst, const char __user *src, long count)
- {
-- long res;
-- __do_strncpy_from_user(dst, src, count, res);
-- return res;
-+ return __do_strncpy_from_user(dst, src, count);
- }
- EXPORT_SYMBOL(__strncpy_from_user);
-
-@@ -114,7 +119,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
- {
- long res = -EFAULT;
- if (access_ok(VERIFY_READ, src, 1))
-- __do_strncpy_from_user(dst, src, count, res);
-+ res = __do_strncpy_from_user(dst, src, count);
- return res;
- }
- EXPORT_SYMBOL(strncpy_from_user);
-@@ -123,24 +128,30 @@ EXPORT_SYMBOL(strncpy_from_user);
- * Zero Userspace
- */
-
--#define __do_clear_user(addr,size) \
--do { \
-- int __d0; \
-- might_fault(); \
-- __asm__ __volatile__( \
-- "0: rep; stosl\n" \
-- " movl %2,%0\n" \
-- "1: rep; stosb\n" \
-- "2:\n" \
-- ".section .fixup,\"ax\"\n" \
-- "3: lea 0(%2,%0,4),%0\n" \
-- " jmp 2b\n" \
-- ".previous\n" \
-- _ASM_EXTABLE(0b,3b) \
-- _ASM_EXTABLE(1b,2b) \
-- : "=&c"(size), "=&D" (__d0) \
-- : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
--} while (0)
-+static unsigned long __do_clear_user(void __user *addr, unsigned long size)
-+{
-+ int __d0;
-+
-+ might_fault();
-+ __asm__ __volatile__(
-+ " movw %w6,%%es\n"
-+ "0: rep; stosl\n"
-+ " movl %2,%0\n"
-+ "1: rep; stosb\n"
-+ "2:\n"
-+ " pushl %%ss\n"
-+ " popl %%es\n"
-+ ".section .fixup,\"ax\"\n"
-+ "3: lea 0(%2,%0,4),%0\n"
-+ " jmp 2b\n"
-+ ".previous\n"
-+ _ASM_EXTABLE(0b,3b)
-+ _ASM_EXTABLE(1b,2b)
-+ : "=&c"(size), "=&D" (__d0)
-+ : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0),
-+ "r"(__USER_DS));
-+ return size;
-+}
-
- /**
- * clear_user: - Zero a block of memory in user space.
-@@ -157,7 +168,7 @@ clear_user(void __user *to, unsigned long n)
- {
- might_fault();
- if (access_ok(VERIFY_WRITE, to, n))
-- __do_clear_user(to, n);
-+ n = __do_clear_user(to, n);
- return n;
- }
- EXPORT_SYMBOL(clear_user);
-@@ -176,8 +187,7 @@ EXPORT_SYMBOL(clear_user);
- unsigned long
- __clear_user(void __user *to, unsigned long n)
- {
-- __do_clear_user(to, n);
-- return n;
-+ return __do_clear_user(to, n);
- }
- EXPORT_SYMBOL(__clear_user);
-
-@@ -200,14 +210,17 @@ long strnlen_user(const char __user *s, long n)
++ "0: lodsb " __copyuser_seg" (%%esi)\n" \
+ " stosb\n" \
+ " testb %%al,%%al\n" \
+ " jz 1f\n" \
+@@ -128,10 +128,12 @@ do { \
+ int __d0; \
+ might_fault(); \
+ __asm__ __volatile__( \
++ __COPYUSER_SET_ES \
+ "0: rep; stosl\n" \
+ " movl %2,%0\n" \
+ "1: rep; stosb\n" \
+ "2:\n" \
++ __COPYUSER_RESTORE_ES \
+ ".section .fixup,\"ax\"\n" \
+ "3: lea 0(%2,%0,4),%0\n" \
+ " jmp 2b\n" \
+@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
might_fault();
__asm__ __volatile__(
-+ " movw %w8,%%es\n"
++ __COPYUSER_SET_ES
" testl %0, %0\n"
" jz 3f\n"
-- " andl %0,%%ecx\n"
-+ " movl %0,%%ecx\n"
- "0: repne; scasb\n"
- " setne %%al\n"
+ " andl %0,%%ecx\n"
+@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
" subl %%ecx,%0\n"
" addl %0,%%eax\n"
"1:\n"
-+ " pushl %%ss\n"
-+ " popl %%es\n"
++ __COPYUSER_RESTORE_ES
".section .fixup,\"ax\"\n"
"2: xorl %%eax,%%eax\n"
" jmp 1b\n"
-@@ -219,7 +232,7 @@ long strnlen_user(const char __user *s, long n)
- " .long 0b,2b\n"
- ".previous"
- :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
-- :"0" (n), "1" (s), "2" (0), "3" (mask)
-+ :"0" (n), "1" (s), "2" (0), "3" (mask), "r" (__USER_DS)
- :"cc");
- return res & mask;
- }
-@@ -227,10 +240,11 @@ EXPORT_SYMBOL(strnlen_user);
+@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
#ifdef CONFIG_X86_INTEL_USERCOPY
static unsigned long
@@ -17054,141 +16508,69 @@ index e218d5d..5f0615c 100644
{
int d0, d1;
__asm__ __volatile__(
-+ " movw %w6, %%es\n"
- " .align 2,0x90\n"
- "1: movl 32(%4), %%eax\n"
- " cmpl $67, %0\n"
-@@ -239,36 +253,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
+@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
" .align 2,0x90\n"
"3: movl 0(%4), %%eax\n"
"4: movl 4(%4), %%edx\n"
- "5: movl %%eax, 0(%3)\n"
- "6: movl %%edx, 4(%3)\n"
-+ "5: movl %%eax, %%es:0(%3)\n"
-+ "6: movl %%edx, %%es:4(%3)\n"
++ "5: movl %%eax, "__copyuser_seg" 0(%3)\n"
++ "6: movl %%edx, "__copyuser_seg" 4(%3)\n"
"7: movl 8(%4), %%eax\n"
"8: movl 12(%4),%%edx\n"
- "9: movl %%eax, 8(%3)\n"
- "10: movl %%edx, 12(%3)\n"
-+ "9: movl %%eax, %%es:8(%3)\n"
-+ "10: movl %%edx, %%es:12(%3)\n"
++ "9: movl %%eax, "__copyuser_seg" 8(%3)\n"
++ "10: movl %%edx, "__copyuser_seg" 12(%3)\n"
"11: movl 16(%4), %%eax\n"
"12: movl 20(%4), %%edx\n"
- "13: movl %%eax, 16(%3)\n"
- "14: movl %%edx, 20(%3)\n"
-+ "13: movl %%eax, %%es:16(%3)\n"
-+ "14: movl %%edx, %%es:20(%3)\n"
++ "13: movl %%eax, "__copyuser_seg" 16(%3)\n"
++ "14: movl %%edx, "__copyuser_seg" 20(%3)\n"
"15: movl 24(%4), %%eax\n"
"16: movl 28(%4), %%edx\n"
- "17: movl %%eax, 24(%3)\n"
- "18: movl %%edx, 28(%3)\n"
-+ "17: movl %%eax, %%es:24(%3)\n"
-+ "18: movl %%edx, %%es:28(%3)\n"
++ "17: movl %%eax, "__copyuser_seg" 24(%3)\n"
++ "18: movl %%edx, "__copyuser_seg" 28(%3)\n"
"19: movl 32(%4), %%eax\n"
"20: movl 36(%4), %%edx\n"
- "21: movl %%eax, 32(%3)\n"
- "22: movl %%edx, 36(%3)\n"
-+ "21: movl %%eax, %%es:32(%3)\n"
-+ "22: movl %%edx, %%es:36(%3)\n"
++ "21: movl %%eax, "__copyuser_seg" 32(%3)\n"
++ "22: movl %%edx, "__copyuser_seg" 36(%3)\n"
"23: movl 40(%4), %%eax\n"
"24: movl 44(%4), %%edx\n"
- "25: movl %%eax, 40(%3)\n"
- "26: movl %%edx, 44(%3)\n"
-+ "25: movl %%eax, %%es:40(%3)\n"
-+ "26: movl %%edx, %%es:44(%3)\n"
++ "25: movl %%eax, "__copyuser_seg" 40(%3)\n"
++ "26: movl %%edx, "__copyuser_seg" 44(%3)\n"
"27: movl 48(%4), %%eax\n"
"28: movl 52(%4), %%edx\n"
- "29: movl %%eax, 48(%3)\n"
- "30: movl %%edx, 52(%3)\n"
-+ "29: movl %%eax, %%es:48(%3)\n"
-+ "30: movl %%edx, %%es:52(%3)\n"
++ "29: movl %%eax, "__copyuser_seg" 48(%3)\n"
++ "30: movl %%edx, "__copyuser_seg" 52(%3)\n"
"31: movl 56(%4), %%eax\n"
"32: movl 60(%4), %%edx\n"
- "33: movl %%eax, 56(%3)\n"
- "34: movl %%edx, 60(%3)\n"
-+ "33: movl %%eax, %%es:56(%3)\n"
-+ "34: movl %%edx, %%es:60(%3)\n"
++ "33: movl %%eax, "__copyuser_seg" 56(%3)\n"
++ "34: movl %%edx, "__copyuser_seg" 60(%3)\n"
" addl $-64, %0\n"
" addl $64, %4\n"
" addl $64, %3\n"
-@@ -282,6 +296,8 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
+@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
++ __COPYUSER_SET_ES
+ "99: rep; movsl\n"
"36: movl %%eax, %0\n"
"37: rep; movsb\n"
"100:\n"
-+ " pushl %%ss\n"
-+ " popl %%es\n"
- ".section .fixup,\"ax\"\n"
- "101: lea 0(%%eax,%0,4),%0\n"
- " jmp 100b\n"
-@@ -328,7 +344,117 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
- " .long 99b,101b\n"
- ".previous"
- : "=&c"(size), "=&D" (d0), "=&S" (d1)
-- : "1"(to), "2"(from), "0"(size)
-+ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
-+ : "eax", "edx", "memory");
-+ return size;
-+}
-+
-+static unsigned long
-+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
-+{
-+ int d0, d1;
-+ __asm__ __volatile__(
-+ " movw %w6, %%ds\n"
-+ " .align 2,0x90\n"
-+ "1: movl 32(%4), %%eax\n"
-+ " cmpl $67, %0\n"
-+ " jbe 3f\n"
-+ "2: movl 64(%4), %%eax\n"
-+ " .align 2,0x90\n"
-+ "3: movl 0(%4), %%eax\n"
-+ "4: movl 4(%4), %%edx\n"
-+ "5: movl %%eax, %%es:0(%3)\n"
-+ "6: movl %%edx, %%es:4(%3)\n"
-+ "7: movl 8(%4), %%eax\n"
-+ "8: movl 12(%4),%%edx\n"
-+ "9: movl %%eax, %%es:8(%3)\n"
-+ "10: movl %%edx, %%es:12(%3)\n"
-+ "11: movl 16(%4), %%eax\n"
-+ "12: movl 20(%4), %%edx\n"
-+ "13: movl %%eax, %%es:16(%3)\n"
-+ "14: movl %%edx, %%es:20(%3)\n"
-+ "15: movl 24(%4), %%eax\n"
-+ "16: movl 28(%4), %%edx\n"
-+ "17: movl %%eax, %%es:24(%3)\n"
-+ "18: movl %%edx, %%es:28(%3)\n"
-+ "19: movl 32(%4), %%eax\n"
-+ "20: movl 36(%4), %%edx\n"
-+ "21: movl %%eax, %%es:32(%3)\n"
-+ "22: movl %%edx, %%es:36(%3)\n"
-+ "23: movl 40(%4), %%eax\n"
-+ "24: movl 44(%4), %%edx\n"
-+ "25: movl %%eax, %%es:40(%3)\n"
-+ "26: movl %%edx, %%es:44(%3)\n"
-+ "27: movl 48(%4), %%eax\n"
-+ "28: movl 52(%4), %%edx\n"
-+ "29: movl %%eax, %%es:48(%3)\n"
-+ "30: movl %%edx, %%es:52(%3)\n"
-+ "31: movl 56(%4), %%eax\n"
-+ "32: movl 60(%4), %%edx\n"
-+ "33: movl %%eax, %%es:56(%3)\n"
-+ "34: movl %%edx, %%es:60(%3)\n"
-+ " addl $-64, %0\n"
-+ " addl $64, %4\n"
-+ " addl $64, %3\n"
-+ " cmpl $63, %0\n"
-+ " ja 1b\n"
-+ "35: movl %0, %%eax\n"
-+ " shrl $2, %0\n"
-+ " andl $3, %%eax\n"
-+ " cld\n"
-+ "99: rep; movsl\n"
-+ "36: movl %%eax, %0\n"
-+ "37: rep; movsb\n"
-+ "100:\n"
-+ " pushl %%ss\n"
-+ " popl %%ds\n"
++ __COPYUSER_RESTORE_ES
+ ".section .fixup,\"ax\"\n"
+ "101: lea 0(%%eax,%0,4),%0\n"
+ " jmp 100b\n"
@@ -17235,248 +16617,287 @@ index e218d5d..5f0615c 100644
+ " .long 99b,101b\n"
+ ".previous"
+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
-+ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
- : "eax", "edx", "memory");
- return size;
- }
-@@ -338,6 +464,7 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
- {
++ : "1"(to), "2"(from), "0"(size)
++ : "eax", "edx", "memory");
++ return size;
++}
++
++static unsigned long
++__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
++{
++ int d0, d1;
++ __asm__ __volatile__(
++ " .align 2,0x90\n"
++ "1: movl "__copyuser_seg" 32(%4), %%eax\n"
++ " cmpl $67, %0\n"
++ " jbe 3f\n"
++ "2: movl "__copyuser_seg" 64(%4), %%eax\n"
++ " .align 2,0x90\n"
++ "3: movl "__copyuser_seg" 0(%4), %%eax\n"
++ "4: movl "__copyuser_seg" 4(%4), %%edx\n"
++ "5: movl %%eax, 0(%3)\n"
++ "6: movl %%edx, 4(%3)\n"
++ "7: movl "__copyuser_seg" 8(%4), %%eax\n"
++ "8: movl "__copyuser_seg" 12(%4),%%edx\n"
++ "9: movl %%eax, 8(%3)\n"
++ "10: movl %%edx, 12(%3)\n"
++ "11: movl "__copyuser_seg" 16(%4), %%eax\n"
++ "12: movl "__copyuser_seg" 20(%4), %%edx\n"
++ "13: movl %%eax, 16(%3)\n"
++ "14: movl %%edx, 20(%3)\n"
++ "15: movl "__copyuser_seg" 24(%4), %%eax\n"
++ "16: movl "__copyuser_seg" 28(%4), %%edx\n"
++ "17: movl %%eax, 24(%3)\n"
++ "18: movl %%edx, 28(%3)\n"
++ "19: movl "__copyuser_seg" 32(%4), %%eax\n"
++ "20: movl "__copyuser_seg" 36(%4), %%edx\n"
++ "21: movl %%eax, 32(%3)\n"
++ "22: movl %%edx, 36(%3)\n"
++ "23: movl "__copyuser_seg" 40(%4), %%eax\n"
++ "24: movl "__copyuser_seg" 44(%4), %%edx\n"
++ "25: movl %%eax, 40(%3)\n"
++ "26: movl %%edx, 44(%3)\n"
++ "27: movl "__copyuser_seg" 48(%4), %%eax\n"
++ "28: movl "__copyuser_seg" 52(%4), %%edx\n"
++ "29: movl %%eax, 48(%3)\n"
++ "30: movl %%edx, 52(%3)\n"
++ "31: movl "__copyuser_seg" 56(%4), %%eax\n"
++ "32: movl "__copyuser_seg" 60(%4), %%edx\n"
++ "33: movl %%eax, 56(%3)\n"
++ "34: movl %%edx, 60(%3)\n"
++ " addl $-64, %0\n"
++ " addl $64, %4\n"
++ " addl $64, %3\n"
++ " cmpl $63, %0\n"
++ " ja 1b\n"
++ "35: movl %0, %%eax\n"
++ " shrl $2, %0\n"
++ " andl $3, %%eax\n"
++ " cld\n"
++ "99: rep; movsl "__copyuser_seg" (%%esi), (%%edi)\n"
++ "36: movl %%eax, %0\n"
++ "37: rep; movsb "__copyuser_seg" (%%esi), (%%edi)\n"
++ "100:\n"
+ ".section .fixup,\"ax\"\n"
+ "101: lea 0(%%eax,%0,4),%0\n"
+ " jmp 100b\n"
+@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
int d0, d1;
__asm__ __volatile__(
-+ " movw %w6, %%ds\n"
" .align 2,0x90\n"
- "0: movl 32(%4), %%eax\n"
+- "0: movl 32(%4), %%eax\n"
++ "0: movl "__copyuser_seg" 32(%4), %%eax\n"
" cmpl $67, %0\n"
-@@ -346,36 +473,36 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+ " jbe 2f\n"
+- "1: movl 64(%4), %%eax\n"
++ "1: movl "__copyuser_seg" 64(%4), %%eax\n"
" .align 2,0x90\n"
- "2: movl 0(%4), %%eax\n"
- "21: movl 4(%4), %%edx\n"
-- " movl %%eax, 0(%3)\n"
-- " movl %%edx, 4(%3)\n"
-+ " movl %%eax, %%es:0(%3)\n"
-+ " movl %%edx, %%es:4(%3)\n"
- "3: movl 8(%4), %%eax\n"
- "31: movl 12(%4),%%edx\n"
-- " movl %%eax, 8(%3)\n"
-- " movl %%edx, 12(%3)\n"
-+ " movl %%eax, %%es:8(%3)\n"
-+ " movl %%edx, %%es:12(%3)\n"
- "4: movl 16(%4), %%eax\n"
- "41: movl 20(%4), %%edx\n"
-- " movl %%eax, 16(%3)\n"
-- " movl %%edx, 20(%3)\n"
-+ " movl %%eax, %%es:16(%3)\n"
-+ " movl %%edx, %%es:20(%3)\n"
- "10: movl 24(%4), %%eax\n"
- "51: movl 28(%4), %%edx\n"
-- " movl %%eax, 24(%3)\n"
-- " movl %%edx, 28(%3)\n"
-+ " movl %%eax, %%es:24(%3)\n"
-+ " movl %%edx, %%es:28(%3)\n"
- "11: movl 32(%4), %%eax\n"
- "61: movl 36(%4), %%edx\n"
-- " movl %%eax, 32(%3)\n"
-- " movl %%edx, 36(%3)\n"
-+ " movl %%eax, %%es:32(%3)\n"
-+ " movl %%edx, %%es:36(%3)\n"
- "12: movl 40(%4), %%eax\n"
- "71: movl 44(%4), %%edx\n"
-- " movl %%eax, 40(%3)\n"
-- " movl %%edx, 44(%3)\n"
-+ " movl %%eax, %%es:40(%3)\n"
-+ " movl %%edx, %%es:44(%3)\n"
- "13: movl 48(%4), %%eax\n"
- "81: movl 52(%4), %%edx\n"
-- " movl %%eax, 48(%3)\n"
-- " movl %%edx, 52(%3)\n"
-+ " movl %%eax, %%es:48(%3)\n"
-+ " movl %%edx, %%es:52(%3)\n"
- "14: movl 56(%4), %%eax\n"
- "91: movl 60(%4), %%edx\n"
-- " movl %%eax, 56(%3)\n"
-- " movl %%edx, 60(%3)\n"
-+ " movl %%eax, %%es:56(%3)\n"
-+ " movl %%edx, %%es:60(%3)\n"
+- "2: movl 0(%4), %%eax\n"
+- "21: movl 4(%4), %%edx\n"
++ "2: movl "__copyuser_seg" 0(%4), %%eax\n"
++ "21: movl "__copyuser_seg" 4(%4), %%edx\n"
+ " movl %%eax, 0(%3)\n"
+ " movl %%edx, 4(%3)\n"
+- "3: movl 8(%4), %%eax\n"
+- "31: movl 12(%4),%%edx\n"
++ "3: movl "__copyuser_seg" 8(%4), %%eax\n"
++ "31: movl "__copyuser_seg" 12(%4),%%edx\n"
+ " movl %%eax, 8(%3)\n"
+ " movl %%edx, 12(%3)\n"
+- "4: movl 16(%4), %%eax\n"
+- "41: movl 20(%4), %%edx\n"
++ "4: movl "__copyuser_seg" 16(%4), %%eax\n"
++ "41: movl "__copyuser_seg" 20(%4), %%edx\n"
+ " movl %%eax, 16(%3)\n"
+ " movl %%edx, 20(%3)\n"
+- "10: movl 24(%4), %%eax\n"
+- "51: movl 28(%4), %%edx\n"
++ "10: movl "__copyuser_seg" 24(%4), %%eax\n"
++ "51: movl "__copyuser_seg" 28(%4), %%edx\n"
+ " movl %%eax, 24(%3)\n"
+ " movl %%edx, 28(%3)\n"
+- "11: movl 32(%4), %%eax\n"
+- "61: movl 36(%4), %%edx\n"
++ "11: movl "__copyuser_seg" 32(%4), %%eax\n"
++ "61: movl "__copyuser_seg" 36(%4), %%edx\n"
+ " movl %%eax, 32(%3)\n"
+ " movl %%edx, 36(%3)\n"
+- "12: movl 40(%4), %%eax\n"
+- "71: movl 44(%4), %%edx\n"
++ "12: movl "__copyuser_seg" 40(%4), %%eax\n"
++ "71: movl "__copyuser_seg" 44(%4), %%edx\n"
+ " movl %%eax, 40(%3)\n"
+ " movl %%edx, 44(%3)\n"
+- "13: movl 48(%4), %%eax\n"
+- "81: movl 52(%4), %%edx\n"
++ "13: movl "__copyuser_seg" 48(%4), %%eax\n"
++ "81: movl "__copyuser_seg" 52(%4), %%edx\n"
+ " movl %%eax, 48(%3)\n"
+ " movl %%edx, 52(%3)\n"
+- "14: movl 56(%4), %%eax\n"
+- "91: movl 60(%4), %%edx\n"
++ "14: movl "__copyuser_seg" 56(%4), %%eax\n"
++ "91: movl "__copyuser_seg" 60(%4), %%edx\n"
+ " movl %%eax, 56(%3)\n"
+ " movl %%edx, 60(%3)\n"
" addl $-64, %0\n"
- " addl $64, %4\n"
- " addl $64, %3\n"
-@@ -389,6 +516,8 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+- "6: rep; movsl\n"
++ "6: rep; movsl "__copyuser_seg" (%%esi), (%%edi)\n"
" movl %%eax,%0\n"
- "7: rep; movsb\n"
+- "7: rep; movsb\n"
++ "7: rep; movsb "__copyuser_seg" (%%esi), (%%edi)\n"
"8:\n"
-+ " pushl %%ss\n"
-+ " popl %%ds\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
- "16: pushl %0\n"
-@@ -423,7 +552,7 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
- " .long 7b,16b\n"
- ".previous"
- : "=&c"(size), "=&D" (d0), "=&S" (d1)
-- : "1"(to), "2"(from), "0"(size)
-+ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
- : "eax", "edx", "memory");
- return size;
- }
-@@ -439,6 +568,7 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
- int d0, d1;
+@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
__asm__ __volatile__(
-+ " movw %w6, %%ds\n"
" .align 2,0x90\n"
- "0: movl 32(%4), %%eax\n"
+- "0: movl 32(%4), %%eax\n"
++ "0: movl "__copyuser_seg" 32(%4), %%eax\n"
" cmpl $67, %0\n"
-@@ -447,36 +577,36 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ " jbe 2f\n"
+- "1: movl 64(%4), %%eax\n"
++ "1: movl "__copyuser_seg" 64(%4), %%eax\n"
" .align 2,0x90\n"
- "2: movl 0(%4), %%eax\n"
- "21: movl 4(%4), %%edx\n"
-- " movnti %%eax, 0(%3)\n"
-- " movnti %%edx, 4(%3)\n"
-+ " movnti %%eax, %%es:0(%3)\n"
-+ " movnti %%edx, %%es:4(%3)\n"
- "3: movl 8(%4), %%eax\n"
- "31: movl 12(%4),%%edx\n"
-- " movnti %%eax, 8(%3)\n"
-- " movnti %%edx, 12(%3)\n"
-+ " movnti %%eax, %%es:8(%3)\n"
-+ " movnti %%edx, %%es:12(%3)\n"
- "4: movl 16(%4), %%eax\n"
- "41: movl 20(%4), %%edx\n"
-- " movnti %%eax, 16(%3)\n"
-- " movnti %%edx, 20(%3)\n"
-+ " movnti %%eax, %%es:16(%3)\n"
-+ " movnti %%edx, %%es:20(%3)\n"
- "10: movl 24(%4), %%eax\n"
- "51: movl 28(%4), %%edx\n"
-- " movnti %%eax, 24(%3)\n"
-- " movnti %%edx, 28(%3)\n"
-+ " movnti %%eax, %%es:24(%3)\n"
-+ " movnti %%edx, %%es:28(%3)\n"
- "11: movl 32(%4), %%eax\n"
- "61: movl 36(%4), %%edx\n"
-- " movnti %%eax, 32(%3)\n"
-- " movnti %%edx, 36(%3)\n"
-+ " movnti %%eax, %%es:32(%3)\n"
-+ " movnti %%edx, %%es:36(%3)\n"
- "12: movl 40(%4), %%eax\n"
- "71: movl 44(%4), %%edx\n"
-- " movnti %%eax, 40(%3)\n"
-- " movnti %%edx, 44(%3)\n"
-+ " movnti %%eax, %%es:40(%3)\n"
-+ " movnti %%edx, %%es:44(%3)\n"
- "13: movl 48(%4), %%eax\n"
- "81: movl 52(%4), %%edx\n"
-- " movnti %%eax, 48(%3)\n"
-- " movnti %%edx, 52(%3)\n"
-+ " movnti %%eax, %%es:48(%3)\n"
-+ " movnti %%edx, %%es:52(%3)\n"
- "14: movl 56(%4), %%eax\n"
- "91: movl 60(%4), %%edx\n"
-- " movnti %%eax, 56(%3)\n"
-- " movnti %%edx, 60(%3)\n"
-+ " movnti %%eax, %%es:56(%3)\n"
-+ " movnti %%edx, %%es:60(%3)\n"
+- "2: movl 0(%4), %%eax\n"
+- "21: movl 4(%4), %%edx\n"
++ "2: movl "__copyuser_seg" 0(%4), %%eax\n"
++ "21: movl "__copyuser_seg" 4(%4), %%edx\n"
+ " movnti %%eax, 0(%3)\n"
+ " movnti %%edx, 4(%3)\n"
+- "3: movl 8(%4), %%eax\n"
+- "31: movl 12(%4),%%edx\n"
++ "3: movl "__copyuser_seg" 8(%4), %%eax\n"
++ "31: movl "__copyuser_seg" 12(%4),%%edx\n"
+ " movnti %%eax, 8(%3)\n"
+ " movnti %%edx, 12(%3)\n"
+- "4: movl 16(%4), %%eax\n"
+- "41: movl 20(%4), %%edx\n"
++ "4: movl "__copyuser_seg" 16(%4), %%eax\n"
++ "41: movl "__copyuser_seg" 20(%4), %%edx\n"
+ " movnti %%eax, 16(%3)\n"
+ " movnti %%edx, 20(%3)\n"
+- "10: movl 24(%4), %%eax\n"
+- "51: movl 28(%4), %%edx\n"
++ "10: movl "__copyuser_seg" 24(%4), %%eax\n"
++ "51: movl "__copyuser_seg" 28(%4), %%edx\n"
+ " movnti %%eax, 24(%3)\n"
+ " movnti %%edx, 28(%3)\n"
+- "11: movl 32(%4), %%eax\n"
+- "61: movl 36(%4), %%edx\n"
++ "11: movl "__copyuser_seg" 32(%4), %%eax\n"
++ "61: movl "__copyuser_seg" 36(%4), %%edx\n"
+ " movnti %%eax, 32(%3)\n"
+ " movnti %%edx, 36(%3)\n"
+- "12: movl 40(%4), %%eax\n"
+- "71: movl 44(%4), %%edx\n"
++ "12: movl "__copyuser_seg" 40(%4), %%eax\n"
++ "71: movl "__copyuser_seg" 44(%4), %%edx\n"
+ " movnti %%eax, 40(%3)\n"
+ " movnti %%edx, 44(%3)\n"
+- "13: movl 48(%4), %%eax\n"
+- "81: movl 52(%4), %%edx\n"
++ "13: movl "__copyuser_seg" 48(%4), %%eax\n"
++ "81: movl "__copyuser_seg" 52(%4), %%edx\n"
+ " movnti %%eax, 48(%3)\n"
+ " movnti %%edx, 52(%3)\n"
+- "14: movl 56(%4), %%eax\n"
+- "91: movl 60(%4), %%edx\n"
++ "14: movl "__copyuser_seg" 56(%4), %%eax\n"
++ "91: movl "__copyuser_seg" 60(%4), %%edx\n"
+ " movnti %%eax, 56(%3)\n"
+ " movnti %%edx, 60(%3)\n"
" addl $-64, %0\n"
- " addl $64, %4\n"
- " addl $64, %3\n"
-@@ -491,6 +621,8 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+- "6: rep; movsl\n"
++ "6: rep; movsl "__copyuser_seg" (%%esi), (%%edi)\n"
" movl %%eax,%0\n"
- "7: rep; movsb\n"
+- "7: rep; movsb\n"
++ "7: rep; movsb "__copyuser_seg" (%%esi), (%%edi)\n"
"8:\n"
-+ " pushl %%ss\n"
-+ " popl %%ds\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
- "16: pushl %0\n"
-@@ -525,7 +657,7 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
- " .long 7b,16b\n"
- ".previous"
- : "=&c"(size), "=&D" (d0), "=&S" (d1)
-- : "1"(to), "2"(from), "0"(size)
-+ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
- : "eax", "edx", "memory");
- return size;
- }
-@@ -536,6 +668,7 @@ static unsigned long __copy_user_intel_nocache(void *to,
- int d0, d1;
+@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
__asm__ __volatile__(
-+ " movw %w6, %%ds\n"
" .align 2,0x90\n"
- "0: movl 32(%4), %%eax\n"
+- "0: movl 32(%4), %%eax\n"
++ "0: movl "__copyuser_seg" 32(%4), %%eax\n"
" cmpl $67, %0\n"
-@@ -544,36 +677,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
+ " jbe 2f\n"
+- "1: movl 64(%4), %%eax\n"
++ "1: movl "__copyuser_seg" 64(%4), %%eax\n"
" .align 2,0x90\n"
- "2: movl 0(%4), %%eax\n"
- "21: movl 4(%4), %%edx\n"
-- " movnti %%eax, 0(%3)\n"
-- " movnti %%edx, 4(%3)\n"
-+ " movnti %%eax, %%es:0(%3)\n"
-+ " movnti %%edx, %%es:4(%3)\n"
- "3: movl 8(%4), %%eax\n"
- "31: movl 12(%4),%%edx\n"
-- " movnti %%eax, 8(%3)\n"
-- " movnti %%edx, 12(%3)\n"
-+ " movnti %%eax, %%es:8(%3)\n"
-+ " movnti %%edx, %%es:12(%3)\n"
- "4: movl 16(%4), %%eax\n"
- "41: movl 20(%4), %%edx\n"
-- " movnti %%eax, 16(%3)\n"
-- " movnti %%edx, 20(%3)\n"
-+ " movnti %%eax, %%es:16(%3)\n"
-+ " movnti %%edx, %%es:20(%3)\n"
- "10: movl 24(%4), %%eax\n"
- "51: movl 28(%4), %%edx\n"
-- " movnti %%eax, 24(%3)\n"
-- " movnti %%edx, 28(%3)\n"
-+ " movnti %%eax, %%es:24(%3)\n"
-+ " movnti %%edx, %%es:28(%3)\n"
- "11: movl 32(%4), %%eax\n"
- "61: movl 36(%4), %%edx\n"
-- " movnti %%eax, 32(%3)\n"
-- " movnti %%edx, 36(%3)\n"
-+ " movnti %%eax, %%es:32(%3)\n"
-+ " movnti %%edx, %%es:36(%3)\n"
- "12: movl 40(%4), %%eax\n"
- "71: movl 44(%4), %%edx\n"
-- " movnti %%eax, 40(%3)\n"
-- " movnti %%edx, 44(%3)\n"
-+ " movnti %%eax, %%es:40(%3)\n"
-+ " movnti %%edx, %%es:44(%3)\n"
- "13: movl 48(%4), %%eax\n"
- "81: movl 52(%4), %%edx\n"
-- " movnti %%eax, 48(%3)\n"
-- " movnti %%edx, 52(%3)\n"
-+ " movnti %%eax, %%es:48(%3)\n"
-+ " movnti %%edx, %%es:52(%3)\n"
- "14: movl 56(%4), %%eax\n"
- "91: movl 60(%4), %%edx\n"
-- " movnti %%eax, 56(%3)\n"
-- " movnti %%edx, 60(%3)\n"
-+ " movnti %%eax, %%es:56(%3)\n"
-+ " movnti %%edx, %%es:60(%3)\n"
+- "2: movl 0(%4), %%eax\n"
+- "21: movl 4(%4), %%edx\n"
++ "2: movl "__copyuser_seg" 0(%4), %%eax\n"
++ "21: movl "__copyuser_seg" 4(%4), %%edx\n"
+ " movnti %%eax, 0(%3)\n"
+ " movnti %%edx, 4(%3)\n"
+- "3: movl 8(%4), %%eax\n"
+- "31: movl 12(%4),%%edx\n"
++ "3: movl "__copyuser_seg" 8(%4), %%eax\n"
++ "31: movl "__copyuser_seg" 12(%4),%%edx\n"
+ " movnti %%eax, 8(%3)\n"
+ " movnti %%edx, 12(%3)\n"
+- "4: movl 16(%4), %%eax\n"
+- "41: movl 20(%4), %%edx\n"
++ "4: movl "__copyuser_seg" 16(%4), %%eax\n"
++ "41: movl "__copyuser_seg" 20(%4), %%edx\n"
+ " movnti %%eax, 16(%3)\n"
+ " movnti %%edx, 20(%3)\n"
+- "10: movl 24(%4), %%eax\n"
+- "51: movl 28(%4), %%edx\n"
++ "10: movl "__copyuser_seg" 24(%4), %%eax\n"
++ "51: movl "__copyuser_seg" 28(%4), %%edx\n"
+ " movnti %%eax, 24(%3)\n"
+ " movnti %%edx, 28(%3)\n"
+- "11: movl 32(%4), %%eax\n"
+- "61: movl 36(%4), %%edx\n"
++ "11: movl "__copyuser_seg" 32(%4), %%eax\n"
++ "61: movl "__copyuser_seg" 36(%4), %%edx\n"
+ " movnti %%eax, 32(%3)\n"
+ " movnti %%edx, 36(%3)\n"
+- "12: movl 40(%4), %%eax\n"
+- "71: movl 44(%4), %%edx\n"
++ "12: movl "__copyuser_seg" 40(%4), %%eax\n"
++ "71: movl "__copyuser_seg" 44(%4), %%edx\n"
+ " movnti %%eax, 40(%3)\n"
+ " movnti %%edx, 44(%3)\n"
+- "13: movl 48(%4), %%eax\n"
+- "81: movl 52(%4), %%edx\n"
++ "13: movl "__copyuser_seg" 48(%4), %%eax\n"
++ "81: movl "__copyuser_seg" 52(%4), %%edx\n"
+ " movnti %%eax, 48(%3)\n"
+ " movnti %%edx, 52(%3)\n"
+- "14: movl 56(%4), %%eax\n"
+- "91: movl 60(%4), %%edx\n"
++ "14: movl "__copyuser_seg" 56(%4), %%eax\n"
++ "91: movl "__copyuser_seg" 60(%4), %%edx\n"
+ " movnti %%eax, 56(%3)\n"
+ " movnti %%edx, 60(%3)\n"
" addl $-64, %0\n"
- " addl $64, %4\n"
- " addl $64, %3\n"
-@@ -588,6 +721,8 @@ static unsigned long __copy_user_intel_nocache(void *to,
+@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+- "6: rep; movsl\n"
++ "6: rep; movsl "__copyuser_seg" (%%esi), (%%edi)\n"
" movl %%eax,%0\n"
- "7: rep; movsb\n"
+- "7: rep; movsb\n"
++ "7: rep; movsb "__copyuser_seg" (%%esi), (%%edi)\n"
"8:\n"
-+ " pushl %%ss\n"
-+ " popl %%ds\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
- "16: jmp 8b\n"
-@@ -616,7 +751,7 @@ static unsigned long __copy_user_intel_nocache(void *to,
- " .long 7b,16b\n"
- ".previous"
- : "=&c"(size), "=&D" (d0), "=&S" (d1)
-- : "1"(to), "2"(from), "0"(size)
-+ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
- : "eax", "edx", "memory");
- return size;
- }
-@@ -629,90 +764,146 @@ static unsigned long __copy_user_intel_nocache(void *to,
+@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
*/
unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
unsigned long size);
@@ -17491,243 +16912,69 @@ index e218d5d..5f0615c 100644
/* Generic arbitrary sized copy. */
-#define __copy_user(to, from, size) \
--do { \
-- int __d0, __d1, __d2; \
-- __asm__ __volatile__( \
-- " cmp $7,%0\n" \
-- " jbe 1f\n" \
-- " movl %1,%0\n" \
-- " negl %0\n" \
-- " andl $7,%0\n" \
-- " subl %0,%3\n" \
++#define __copy_user(to, from, size, prefix, set, restore) \
+ do { \
+ int __d0, __d1, __d2; \
+ __asm__ __volatile__( \
++ set \
+ " cmp $7,%0\n" \
+ " jbe 1f\n" \
+ " movl %1,%0\n" \
+ " negl %0\n" \
+ " andl $7,%0\n" \
+ " subl %0,%3\n" \
- "4: rep; movsb\n" \
-- " movl %3,%0\n" \
-- " shrl $2,%0\n" \
-- " andl $3,%3\n" \
-- " .align 2,0x90\n" \
++ "4: rep; movsb "prefix" (%%esi), (%%edi)\n" \
+ " movl %3,%0\n" \
+ " shrl $2,%0\n" \
+ " andl $3,%3\n" \
+ " .align 2,0x90\n" \
- "0: rep; movsl\n" \
-- " movl %3,%0\n" \
++ "0: rep; movsl "prefix" (%%esi), (%%edi)\n" \
+ " movl %3,%0\n" \
- "1: rep; movsb\n" \
-- "2:\n" \
-- ".section .fixup,\"ax\"\n" \
-- "5: addl %3,%0\n" \
-- " jmp 2b\n" \
-- "3: lea 0(%3,%0,4),%0\n" \
-- " jmp 2b\n" \
-- ".previous\n" \
-- ".section __ex_table,\"a\"\n" \
-- " .align 4\n" \
-- " .long 4b,5b\n" \
-- " .long 0b,3b\n" \
-- " .long 1b,2b\n" \
-- ".previous" \
-- : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
-- : "3"(size), "0"(size), "1"(to), "2"(from) \
-- : "memory"); \
--} while (0)
--
--#define __copy_user_zeroing(to, from, size) \
--do { \
-- int __d0, __d1, __d2; \
-- __asm__ __volatile__( \
-- " cmp $7,%0\n" \
-- " jbe 1f\n" \
-- " movl %1,%0\n" \
-- " negl %0\n" \
-- " andl $7,%0\n" \
-- " subl %0,%3\n" \
++ "1: rep; movsb "prefix" (%%esi), (%%edi)\n" \
+ "2:\n" \
++ restore \
+ ".section .fixup,\"ax\"\n" \
+ "5: addl %3,%0\n" \
+ " jmp 2b\n" \
+@@ -682,14 +799,14 @@ do { \
+ " negl %0\n" \
+ " andl $7,%0\n" \
+ " subl %0,%3\n" \
- "4: rep; movsb\n" \
-- " movl %3,%0\n" \
-- " shrl $2,%0\n" \
-- " andl $3,%3\n" \
-- " .align 2,0x90\n" \
++ "4: rep; movsb "__copyuser_seg" (%%esi), (%%edi)\n" \
+ " movl %3,%0\n" \
+ " shrl $2,%0\n" \
+ " andl $3,%3\n" \
+ " .align 2,0x90\n" \
- "0: rep; movsl\n" \
-- " movl %3,%0\n" \
++ "0: rep; movsl "__copyuser_seg" (%%esi), (%%edi)\n" \
+ " movl %3,%0\n" \
- "1: rep; movsb\n" \
-- "2:\n" \
-- ".section .fixup,\"ax\"\n" \
-- "5: addl %3,%0\n" \
-- " jmp 6f\n" \
-- "3: lea 0(%3,%0,4),%0\n" \
-- "6: pushl %0\n" \
-- " pushl %%eax\n" \
-- " xorl %%eax,%%eax\n" \
-- " rep; stosb\n" \
-- " popl %%eax\n" \
-- " popl %0\n" \
-- " jmp 2b\n" \
-- ".previous\n" \
-- ".section __ex_table,\"a\"\n" \
-- " .align 4\n" \
-- " .long 4b,5b\n" \
-- " .long 0b,3b\n" \
-- " .long 1b,6b\n" \
-- ".previous" \
-- : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
-- : "3"(size), "0"(size), "1"(to), "2"(from) \
-- : "memory"); \
--} while (0)
-+static unsigned long
-+__generic_copy_to_user(void __user *to, const void *from, unsigned long size)
-+{
-+ int __d0, __d1, __d2;
-+
-+ __asm__ __volatile__(
-+ " movw %w8,%%es\n"
-+ " cmp $7,%0\n"
-+ " jbe 1f\n"
-+ " movl %1,%0\n"
-+ " negl %0\n"
-+ " andl $7,%0\n"
-+ " subl %0,%3\n"
-+ "4: rep; movsb\n"
-+ " movl %3,%0\n"
-+ " shrl $2,%0\n"
-+ " andl $3,%3\n"
-+ " .align 2,0x90\n"
-+ "0: rep; movsl\n"
-+ " movl %3,%0\n"
-+ "1: rep; movsb\n"
-+ "2:\n"
-+ " pushl %%ss\n"
-+ " popl %%es\n"
-+ ".section .fixup,\"ax\"\n"
-+ "5: addl %3,%0\n"
-+ " jmp 2b\n"
-+ "3: lea 0(%3,%0,4),%0\n"
-+ " jmp 2b\n"
-+ ".previous\n"
-+ ".section __ex_table,\"a\"\n"
-+ " .align 4\n"
-+ " .long 4b,5b\n"
-+ " .long 0b,3b\n"
-+ " .long 1b,2b\n"
-+ ".previous"
-+ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
-+ : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
-+ : "memory");
-+ return size;
-+}
-+
-+static unsigned long
-+__generic_copy_from_user(void *to, const void __user *from, unsigned long size)
-+{
-+ int __d0, __d1, __d2;
-+
-+ __asm__ __volatile__(
-+ " movw %w8,%%ds\n"
-+ " cmp $7,%0\n"
-+ " jbe 1f\n"
-+ " movl %1,%0\n"
-+ " negl %0\n"
-+ " andl $7,%0\n"
-+ " subl %0,%3\n"
-+ "4: rep; movsb\n"
-+ " movl %3,%0\n"
-+ " shrl $2,%0\n"
-+ " andl $3,%3\n"
-+ " .align 2,0x90\n"
-+ "0: rep; movsl\n"
-+ " movl %3,%0\n"
-+ "1: rep; movsb\n"
-+ "2:\n"
-+ " pushl %%ss\n"
-+ " popl %%ds\n"
-+ ".section .fixup,\"ax\"\n"
-+ "5: addl %3,%0\n"
-+ " jmp 2b\n"
-+ "3: lea 0(%3,%0,4),%0\n"
-+ " jmp 2b\n"
-+ ".previous\n"
-+ ".section __ex_table,\"a\"\n"
-+ " .align 4\n"
-+ " .long 4b,5b\n"
-+ " .long 0b,3b\n"
-+ " .long 1b,2b\n"
-+ ".previous"
-+ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
-+ : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
-+ : "memory");
-+ return size;
-+}
-+
-+static unsigned long
-+__copy_user_zeroing(void *to, const void __user *from, unsigned long size)
-+{
-+ int __d0, __d1, __d2;
-+
-+ __asm__ __volatile__(
-+ " movw %w8,%%ds\n"
-+ " cmp $7,%0\n"
-+ " jbe 1f\n"
-+ " movl %1,%0\n"
-+ " negl %0\n"
-+ " andl $7,%0\n"
-+ " subl %0,%3\n"
-+ "4: rep; movsb\n"
-+ " movl %3,%0\n"
-+ " shrl $2,%0\n"
-+ " andl $3,%3\n"
-+ " .align 2,0x90\n"
-+ "0: rep; movsl\n"
-+ " movl %3,%0\n"
-+ "1: rep; movsb\n"
-+ "2:\n"
-+ " pushl %%ss\n"
-+ " popl %%ds\n"
-+ ".section .fixup,\"ax\"\n"
-+ "5: addl %3,%0\n"
-+ " jmp 6f\n"
-+ "3: lea 0(%3,%0,4),%0\n"
-+ "6: pushl %0\n"
-+ " pushl %%eax\n"
-+ " xorl %%eax,%%eax\n"
-+ " rep; stosb\n"
-+ " popl %%eax\n"
-+ " popl %0\n"
-+ " jmp 2b\n"
-+ ".previous\n"
-+ ".section __ex_table,\"a\"\n"
-+ " .align 4\n"
-+ " .long 4b,5b\n"
-+ " .long 0b,3b\n"
-+ " .long 1b,6b\n"
-+ ".previous"
-+ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
-+ : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
-+ : "memory");
-+ return size;
-+}
-
- unsigned long __copy_to_user_ll(void __user *to, const void *from,
- unsigned long n)
-@@ -775,9 +966,9 @@ survive:
++ "1: rep; movsb "__copyuser_seg" (%%esi), (%%edi)\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "5: addl %3,%0\n" \
+@@ -775,9 +892,9 @@ survive:
}
#endif
if (movsl_is_ok(to, from, n))
- __copy_user(to, from, n);
-+ n = __generic_copy_to_user(to, from, n);
++ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
else
- n = __copy_user_intel(to, from, n);
+ n = __generic_copy_to_user_intel(to, from, n);
return n;
}
EXPORT_SYMBOL(__copy_to_user_ll);
-@@ -786,7 +977,7 @@ unsigned long __copy_from_user_ll(void *to, const void __user *from,
- unsigned long n)
- {
- if (movsl_is_ok(to, from, n))
-- __copy_user_zeroing(to, from, n);
-+ n = __copy_user_zeroing(to, from, n);
- else
- n = __copy_user_zeroing_intel(to, from, n);
- return n;
-@@ -797,10 +988,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
+@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
unsigned long n)
{
if (movsl_is_ok(to, from, n))
- __copy_user(to, from, n);
-+ n = __generic_copy_from_user(to, from, n);
++ __copy_user(to, from, n, __copyuser_seg, "", "");
else
- n = __copy_user_intel((void __user *)to,
- (const void *)from, n);
@@ -17735,27 +16982,15 @@ index e218d5d..5f0615c 100644
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nozero);
-@@ -812,9 +1002,9 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
- if (n > 64 && cpu_has_xmm2)
- n = __copy_user_zeroing_intel_nocache(to, from, n);
- else
-- __copy_user_zeroing(to, from, n);
-+ n = __copy_user_zeroing(to, from, n);
- #else
-- __copy_user_zeroing(to, from, n);
-+ n = __copy_user_zeroing(to, from, n);
- #endif
- return n;
- }
-@@ -827,65 +1017,53 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
+@@ -827,65 +943,49 @@ unsigned long __copy_from_user_ll_nocach
if (n > 64 && cpu_has_xmm2)
n = __copy_user_intel_nocache(to, from, n);
else
- __copy_user(to, from, n);
-+ n = __generic_copy_from_user(to, from, n);
++ __copy_user(to, from, n, __copyuser_seg, "", "");
#else
- __copy_user(to, from, n);
-+ n = __generic_copy_from_user(to, from, n);
++ __copy_user(to, from, n, __copyuser_seg, "", "");
#endif
return n;
}
@@ -17818,41 +17053,36 @@ index e218d5d..5f0615c 100644
-void copy_from_user_overflow(void)
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+void __set_fs(mm_segment_t x, int cpu)
++void __set_fs(mm_segment_t x)
{
- WARN(1, "Buffer overflow detected!\n");
-+ unsigned long limit = x.seg;
-+ struct desc_struct d;
-+
-+ current_thread_info()->addr_limit = x;
-+ if (unlikely(paravirt_enabled()))
-+ return;
-+
-+ if (likely(limit))
-+ limit = (limit - 1UL) >> PAGE_SHIFT;
-+ pack_descriptor(&d, 0UL, limit, 0xF3, 0xC);
-+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_DS, &d, DESCTYPE_S);
++ switch (x.seg) {
++ case 0:
++ loadsegment(gs, 0);
++ break;
++ case TASK_SIZE_MAX:
++ loadsegment(gs, __USER_DS);
++ break;
++ case -1UL:
++ loadsegment(gs, __KERNEL_DS);
++ break;
++ default:
++ BUG();
++ }
++ return;
}
-EXPORT_SYMBOL(copy_from_user_overflow);
+
+void set_fs(mm_segment_t x)
+{
-+ __set_fs(x, get_cpu());
-+ put_cpu();
-+}
-+EXPORT_SYMBOL(copy_from_user);
-+#else
-+void set_fs(mm_segment_t x)
-+{
+ current_thread_info()->addr_limit = x;
++ __set_fs(x);
+}
-+#endif
-+
+EXPORT_SYMBOL(set_fs);
-diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
-index b7c2849..7d0bb03 100644
---- a/arch/x86/lib/usercopy_64.c
-+++ b/arch/x86/lib/usercopy_64.c
++#endif
+diff -urNp linux-2.6.37/arch/x86/lib/usercopy_64.c linux-2.6.37/arch/x86/lib/usercopy_64.c
+--- linux-2.6.37/arch/x86/lib/usercopy_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/lib/usercopy_64.c 2011-01-17 02:41:01.000000000 -0500
@@ -42,6 +42,8 @@ long
__strncpy_from_user(char *dst, const char __user *src, long count)
{
@@ -17862,7 +17092,7 @@ index b7c2849..7d0bb03 100644
__do_strncpy_from_user(dst, src, count, res);
return res;
}
-@@ -65,6 +67,8 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
+@@ -65,6 +67,8 @@ unsigned long __clear_user(void __user *
{
long __d0;
might_fault();
@@ -17889,10 +17119,25 @@ index b7c2849..7d0bb03 100644
}
EXPORT_SYMBOL(copy_in_user);
-diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
-index d0474ad..360f761 100644
---- a/arch/x86/mm/extable.c
-+++ b/arch/x86/mm/extable.c
+diff -urNp linux-2.6.37/arch/x86/Makefile linux-2.6.37/arch/x86/Makefile
+--- linux-2.6.37/arch/x86/Makefile 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/Makefile 2011-01-17 02:41:01.000000000 -0500
+@@ -195,3 +195,12 @@ define archhelp
+ echo ' FDARGS="..." arguments for the booted kernel'
+ echo ' FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
++
++archprepare:
++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+diff -urNp linux-2.6.37/arch/x86/mm/extable.c linux-2.6.37/arch/x86/mm/extable.c
+--- linux-2.6.37/arch/x86/mm/extable.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/extable.c 2011-01-17 02:41:01.000000000 -0500
@@ -1,14 +1,71 @@
#include <linux/module.h>
#include <linux/spinlock.h>
@@ -17966,14 +17211,13 @@ index d0474ad..360f761 100644
extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
extern u32 pnp_bios_is_utter_crap;
pnp_bios_is_utter_crap = 1;
-diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index 4c4508e..8752b7e 100644
---- a/arch/x86/mm/fault.c
-+++ b/arch/x86/mm/fault.c
-@@ -11,10 +11,19 @@
- #include <linux/kprobes.h> /* __kprobes, ... */
+diff -urNp linux-2.6.37/arch/x86/mm/fault.c linux-2.6.37/arch/x86/mm/fault.c
+--- linux-2.6.37/arch/x86/mm/fault.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/fault.c 2011-01-17 02:41:01.000000000 -0500
+@@ -12,10 +12,18 @@
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
+ #include <linux/hugetlb.h> /* hstate_index_to_shift */
+#include <linux/unistd.h>
+#include <linux/compiler.h>
@@ -17985,12 +17229,11 @@ index 4c4508e..8752b7e 100644
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#include <asm/stacktrace.h>
-+#include "../kernel/dumpstack.h"
+#endif
/*
* Page fault error code bits:
-@@ -52,7 +61,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
+@@ -53,7 +61,7 @@ static inline int __kprobes notify_page_
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
@@ -17999,7 +17242,31 @@ index 4c4508e..8752b7e 100644
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
-@@ -173,6 +182,30 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
+@@ -114,7 +122,10 @@ check_prefetch_opcode(struct pt_regs *re
+ return !instr_lo || (instr_lo>>1) == 1;
+ case 0x00:
+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
+- if (probe_kernel_address(instr, opcode))
++ if (user_mode(regs)) {
++ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
++ return 0;
++ } else if (probe_kernel_address(instr, opcode))
+ return 0;
+
+ *prefetch = (instr_lo == 0xF) &&
+@@ -148,7 +159,10 @@ is_prefetch(struct pt_regs *regs, unsign
+ while (instr < max_instr) {
+ unsigned char opcode;
+
+- if (probe_kernel_address(instr, opcode))
++ if (user_mode(regs)) {
++ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
++ break;
++ } else if (probe_kernel_address(instr, opcode))
+ break;
+
+ instr++;
+@@ -179,6 +193,30 @@ force_sig_info_fault(int si_signo, int s
force_sig_info(si_signo, &info, tsk);
}
@@ -18030,7 +17297,7 @@ index 4c4508e..8752b7e 100644
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);
-@@ -225,11 +258,24 @@ void vmalloc_sync_all(void)
+@@ -231,18 +269,35 @@ void vmalloc_sync_all(void)
address += PMD_SIZE) {
unsigned long flags;
@@ -18046,17 +17313,28 @@ index 4c4508e..8752b7e 100644
+#ifdef CONFIG_PAX_PER_CPU_PGD
+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ pgd_t *pgd = get_cpu_pgd(cpu);
++ pmd_t *ret;
+#else
list_for_each_entry(page, &pgd_list, lru) {
-- if (!vmalloc_sync_one(page_address(page), address))
+ pgd_t *pgd = page_address(page);
+ spinlock_t *pgt_lock;
+ pmd_t *ret;
+
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+
+ spin_lock(pgt_lock);
+- ret = vmalloc_sync_one(page_address(page), address);
+#endif
+
-+ if (!vmalloc_sync_one(pgd, address))
++ ret = vmalloc_sync_one(pgd, address);
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ spin_unlock(pgt_lock);
++#endif
+
+ if (!ret)
break;
- }
- spin_unlock_irqrestore(&pgd_lock, flags);
-@@ -259,6 +305,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
+@@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
* an interrupt in the middle of a task switch..
*/
pgd_paddr = read_cr3();
@@ -18068,35 +17346,7 @@ index 4c4508e..8752b7e 100644
pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
if (!pmd_k)
return -1;
-@@ -333,15 +384,27 @@ void vmalloc_sync_all(void)
-
- const pgd_t *pgd_ref = pgd_offset_k(address);
- unsigned long flags;
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ unsigned long cpu;
-+#else
- struct page *page;
-+#endif
-
- if (pgd_none(*pgd_ref))
- continue;
-
- spin_lock_irqsave(&pgd_lock, flags);
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
-+#else
- list_for_each_entry(page, &pgd_list, lru) {
- pgd_t *pgd;
- pgd = (pgd_t *)page_address(page) + pgd_index(address);
-+#endif
-+
- if (pgd_none(*pgd))
- set_pgd(pgd, *pgd_ref);
- else
-@@ -374,7 +437,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
+@@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
* happen within a race in page table update. In the later
* case just flush:
*/
@@ -18111,7 +17361,7 @@ index 4c4508e..8752b7e 100644
pgd_ref = pgd_offset_k(address);
if (pgd_none(*pgd_ref))
return -1;
-@@ -536,7 +606,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
+@@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
@@ -18120,7 +17370,7 @@ index 4c4508e..8752b7e 100644
return 1;
#endif
return 0;
-@@ -563,7 +633,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
+@@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
}
static const char nx_warning[] = KERN_CRIT
@@ -18129,7 +17379,7 @@ index 4c4508e..8752b7e 100644
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
-@@ -572,15 +642,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -569,15 +636,26 @@ show_fault_oops(struct pt_regs *regs, un
if (!oops_may_print())
return;
@@ -18158,7 +17408,7 @@ index 4c4508e..8752b7e 100644
printk(KERN_ALERT "BUG: unable to handle kernel ");
if (address < PAGE_SIZE)
printk(KERN_CONT "NULL pointer dereference");
-@@ -705,6 +786,68 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+@@ -702,6 +780,68 @@ __bad_area_nosemaphore(struct pt_regs *r
unsigned long address, int si_code)
{
struct task_struct *tsk = current;
@@ -18227,7 +17477,7 @@ index 4c4508e..8752b7e 100644
/* User mode accesses just cause a SIGSEGV */
if (error_code & PF_USER) {
-@@ -851,6 +994,106 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
+@@ -849,6 +989,99 @@ static int spurious_fault_check(unsigned
return 1;
}
@@ -18296,9 +17546,6 @@ index 4c4508e..8752b7e 100644
+ * PaX: fill DTLB with user rights and retry
+ */
+ __asm__ __volatile__ (
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ "movw %w4,%%es\n"
-+#endif
+ "orb %2,(%1)\n"
+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
+/*
@@ -18316,14 +17563,10 @@ index 4c4508e..8752b7e 100644
+ */
+ "invlpg (%0)\n"
+#endif
-+ "testb $0,%%es:(%0)\n"
++ "testb $0,"__copyuser_seg"(%0)\n"
+ "xorb %3,(%1)\n"
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ "pushl %%ss\n"
-+ "popl %%es\n"
-+#endif
+ :
-+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER), "r" (__USER_DS)
++ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
+ : "memory", "cc");
+ pte_unmap_unlock(pte, ptl);
+ up_read(&mm->mmap_sem);
@@ -18334,24 +17577,26 @@ index 4c4508e..8752b7e 100644
/*
* Handle a spurious fault caused by a stale TLB entry.
*
-@@ -917,6 +1160,9 @@ int show_unhandled_signals = 1;
+@@ -921,6 +1154,9 @@ int show_unhandled_signals = 1;
static inline int
- access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
+ access_error(unsigned long error_code, struct vm_area_struct *vma)
{
+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
+ return 1;
+
- if (write) {
+ if (error_code & PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
-@@ -950,17 +1196,31 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -954,19 +1190,33 @@ do_page_fault(struct pt_regs *regs, unsi
{
struct vm_area_struct *vma;
struct task_struct *tsk;
- unsigned long address;
struct mm_struct *mm;
- int write;
int fault;
+ int write = error_code & PF_WRITE;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
+ (write ? FAULT_FLAG_WRITE : 0);
+ /* Get the faulting address: */
+ unsigned long address = read_cr2();
@@ -18364,7 +17609,7 @@ index 4c4508e..8752b7e 100644
+ }
+ if (address < PAX_USER_SHADOW_BASE) {
+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
-+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
++ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
+ } else
+ address -= PAX_USER_SHADOW_BASE;
@@ -18380,7 +17625,7 @@ index 4c4508e..8752b7e 100644
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
-@@ -1020,7 +1280,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1026,7 +1276,7 @@ do_page_fault(struct pt_regs *regs, unsi
* User-mode registers count as a user access even for any
* potential system fault or CPU buglet:
*/
@@ -18389,7 +17634,7 @@ index 4c4508e..8752b7e 100644
local_irq_enable();
error_code |= PF_USER;
} else {
-@@ -1074,6 +1334,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1081,6 +1331,11 @@ retry:
might_sleep();
}
@@ -18401,7 +17646,7 @@ index 4c4508e..8752b7e 100644
vma = find_vma(mm, address);
if (unlikely(!vma)) {
bad_area(regs, error_code, address);
-@@ -1085,18 +1350,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1092,18 +1347,24 @@ retry:
bad_area(regs, error_code, address);
return;
}
@@ -18437,7 +17682,7 @@ index 4c4508e..8752b7e 100644
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
-@@ -1140,3 +1411,199 @@ good_area:
+@@ -1158,3 +1419,199 @@ good_area:
up_read(&mm->mmap_sem);
}
@@ -18637,11 +17882,10 @@ index 4c4508e..8752b7e 100644
+
+ return ret ? -EFAULT : 0;
+}
-diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
-index 738e659..ca82c82 100644
---- a/arch/x86/mm/gup.c
-+++ b/arch/x86/mm/gup.c
-@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+diff -urNp linux-2.6.37/arch/x86/mm/gup.c linux-2.6.37/arch/x86/mm/gup.c
+--- linux-2.6.37/arch/x86/mm/gup.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/gup.c 2011-01-17 02:41:01.000000000 -0500
+@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
@@ -18650,11 +17894,10 @@ index 738e659..ca82c82 100644
(void __user *)start, len)))
return 0;
-diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
-index 63a6ba6..79abd7a 100644
---- a/arch/x86/mm/highmem_32.c
-+++ b/arch/x86/mm/highmem_32.c
-@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+diff -urNp linux-2.6.37/arch/x86/mm/highmem_32.c linux-2.6.37/arch/x86/mm/highmem_32.c
+--- linux-2.6.37/arch/x86/mm/highmem_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/highmem_32.c 2011-01-17 02:41:01.000000000 -0500
+@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
@@ -18665,11 +17908,10 @@ index 63a6ba6..79abd7a 100644
return (void *)vaddr;
}
-diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
-index 069ce7c..d318f90 100644
---- a/arch/x86/mm/hugetlbpage.c
-+++ b/arch/x86/mm/hugetlbpage.c
-@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+diff -urNp linux-2.6.37/arch/x86/mm/hugetlbpage.c linux-2.6.37/arch/x86/mm/hugetlbpage.c
+--- linux-2.6.37/arch/x86/mm/hugetlbpage.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/hugetlbpage.c 2011-01-17 02:41:01.000000000 -0500
+@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -18729,7 +17971,7 @@ index 069ce7c..d318f90 100644
}
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
-@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
{
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
@@ -18742,7 +17984,7 @@ index 069ce7c..d318f90 100644
/* don't allow allocations above current base */
if (mm->free_area_cache > base)
-@@ -321,7 +328,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -321,7 +328,7 @@ static unsigned long hugetlb_get_unmappe
largest_hole = 0;
mm->free_area_cache = base;
}
@@ -18841,7 +18083,7 @@ index 069ce7c..d318f90 100644
mm->free_area_cache = base;
mm->cached_hole_size = ~0UL;
-@@ -399,10 +405,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -399,10 +405,19 @@ hugetlb_get_unmapped_area(struct file *f
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -18862,7 +18104,7 @@ index 069ce7c..d318f90 100644
return -ENOMEM;
if (flags & MAP_FIXED) {
-@@ -414,8 +429,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -414,8 +429,7 @@ hugetlb_get_unmapped_area(struct file *f
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
@@ -18872,132 +18114,10 @@ index 069ce7c..d318f90 100644
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
-diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
-index b278535..9654c83 100644
---- a/arch/x86/mm/init.c
-+++ b/arch/x86/mm/init.c
-@@ -70,11 +70,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
- * cause a hotspot and fill up ZONE_DMA. The page tables
- * need roughly 0.5KB per GB.
- */
--#ifdef CONFIG_X86_32
-- start = 0x7000;
--#else
-- start = 0x8000;
--#endif
-+ start = 0x100000;
- e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
- tables, PAGE_SIZE);
- if (e820_table_start == -1UL)
-@@ -321,7 +317,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
- */
- int devmem_is_allowed(unsigned long pagenr)
- {
-- if (pagenr <= 256)
-+ if (!pagenr)
-+ return 1;
-+#ifdef CONFIG_VM86
-+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
-+ return 1;
-+#endif
-+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
- return 1;
- if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
- return 0;
-@@ -380,6 +382,88 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
-
- void free_initmem(void)
- {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+#ifdef CONFIG_X86_32
-+ /* PaX: limit KERNEL_CS to actual size */
-+ unsigned long addr, limit;
-+ struct desc_struct d;
-+ int cpu;
-+
-+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
-+ limit = (limit - 1UL) >> PAGE_SHIFT;
-+
-+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
-+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
-+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
-+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
-+ }
-+
-+ /* PaX: make KERNEL_CS read-only */
-+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
-+ if (!paravirt_enabled())
-+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
-+/*
-+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
-+ }
-+*/
-+#ifdef CONFIG_X86_PAE
-+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
-+/*
-+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
-+ }
-+*/
-+#endif
-+
-+#ifdef CONFIG_MODULES
-+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
-+#endif
-+
-+#else
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ unsigned long addr, end;
-+
-+ /* PaX: make kernel code/rodata read-only, rest non-executable */
-+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ if (!pmd_present(*pmd))
-+ continue;
-+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
-+ else
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
-+ }
-+
-+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
-+ end = addr + KERNEL_IMAGE_SIZE;
-+ for (; addr < end; addr += PMD_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ if (!pmd_present(*pmd))
-+ continue;
-+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
-+ else
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
-+ }
-+#endif
-+
-+ flush_tlb_all();
-+#endif
-+
- free_init_pages("unused kernel memory",
- (unsigned long)(&__init_begin),
- (unsigned long)(&__init_end));
-diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index bca7909..313e4cb 100644
---- a/arch/x86/mm/init_32.c
-+++ b/arch/x86/mm/init_32.c
-@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
+diff -urNp linux-2.6.37/arch/x86/mm/init_32.c linux-2.6.37/arch/x86/mm/init_32.c
+--- linux-2.6.37/arch/x86/mm/init_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/init_32.c 2011-01-17 02:41:01.000000000 -0500
+@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
}
/*
@@ -19034,7 +18154,7 @@ index bca7909..313e4cb 100644
* Create a page table and place a pointer to it in a middle page
* directory entry:
*/
-@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
+@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_ini
page_table = (pte_t *)alloc_low_page();
paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
@@ -19063,7 +18183,7 @@ index bca7909..313e4cb 100644
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
{
int pgd_idx = pgd_index(vaddr);
-@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+@@ -202,6 +187,7 @@ page_table_range_init(unsigned long star
int pgd_idx, pmd_idx;
unsigned long vaddr;
pgd_t *pgd;
@@ -19071,7 +18191,7 @@ index bca7909..313e4cb 100644
pmd_t *pmd;
pte_t *pte = NULL;
-@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+@@ -211,8 +197,13 @@ page_table_range_init(unsigned long star
pgd = pgd_base + pgd_idx;
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
@@ -19087,7 +18207,7 @@ index bca7909..313e4cb 100644
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
pmd++, pmd_idx++) {
pte = page_table_kmap_check(one_page_table_init(pmd),
-@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+@@ -224,11 +215,20 @@ page_table_range_init(unsigned long star
}
}
@@ -19112,7 +18232,7 @@ index bca7909..313e4cb 100644
}
/*
-@@ -244,9 +244,10 @@ kernel_physical_mapping_init(unsigned long start,
+@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned lo
unsigned long last_map_addr = end;
unsigned long start_pfn, end_pfn;
pgd_t *pgd_base = swapper_pg_dir;
@@ -19124,7 +18244,7 @@ index bca7909..313e4cb 100644
pmd_t *pmd;
pte_t *pte;
unsigned pages_2m, pages_4k;
-@@ -279,8 +280,13 @@ repeat:
+@@ -280,8 +281,13 @@ repeat:
pfn = start_pfn;
pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
pgd = pgd_base + pgd_idx;
@@ -19140,7 +18260,7 @@ index bca7909..313e4cb 100644
if (pfn >= end_pfn)
continue;
-@@ -292,14 +298,13 @@ repeat:
+@@ -293,14 +299,13 @@ repeat:
#endif
for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
pmd++, pmd_idx++) {
@@ -19156,7 +18276,7 @@ index bca7909..313e4cb 100644
pgprot_t prot = PAGE_KERNEL_LARGE;
/*
* first pass will use the same initial
-@@ -309,11 +314,7 @@ repeat:
+@@ -310,11 +315,7 @@ repeat:
__pgprot(PTE_IDENT_ATTR |
_PAGE_PSE);
@@ -19169,7 +18289,7 @@ index bca7909..313e4cb 100644
prot = PAGE_KERNEL_LARGE_EXEC;
pages_2m++;
-@@ -330,7 +331,7 @@ repeat:
+@@ -331,7 +332,7 @@ repeat:
pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
pte += pte_ofs;
for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
@@ -19178,7 +18298,7 @@ index bca7909..313e4cb 100644
pgprot_t prot = PAGE_KERNEL;
/*
* first pass will use the same initial
-@@ -338,7 +339,7 @@ repeat:
+@@ -339,7 +340,7 @@ repeat:
*/
pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
@@ -19187,7 +18307,7 @@ index bca7909..313e4cb 100644
prot = PAGE_KERNEL_EXEC;
pages_4k++;
-@@ -491,7 +492,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
+@@ -471,7 +472,7 @@ void __init native_pagetable_setup_start
pud = pud_offset(pgd, va);
pmd = pmd_offset(pud, va);
@@ -19196,7 +18316,7 @@ index bca7909..313e4cb 100644
break;
pte = pte_offset_kernel(pmd, va);
-@@ -543,9 +544,7 @@ void __init early_ioremap_page_table_range_init(void)
+@@ -523,12 +524,10 @@ void __init early_ioremap_page_table_ran
static void __init pagetable_init(void)
{
@@ -19206,41 +18326,12 @@ index bca7909..313e4cb 100644
+ permanent_kmaps_init(swapper_pg_dir);
}
- #ifdef CONFIG_ACPI_SLEEP
-@@ -553,12 +552,12 @@ static void __init pagetable_init(void)
- * ACPI suspend needs this for resume, because things like the intel-agp
- * driver might have split up a kernel 4MB mapping.
- */
--char swsusp_pg_dir[PAGE_SIZE]
-+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
- __attribute__ ((aligned(PAGE_SIZE)));
-
- static inline void save_pg_dir(void)
- {
-- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
-+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
- }
- #else /* !CONFIG_ACPI_SLEEP */
- static inline void save_pg_dir(void)
-@@ -590,7 +589,7 @@ void zap_low_mappings(bool early)
- flush_tlb_all();
- }
-
-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
EXPORT_SYMBOL_GPL(__supported_pte_mask);
/* user-defined highmem size */
-@@ -781,7 +780,7 @@ void __init setup_bootmem_allocator(void)
- * Initialize the boot-time allocator (with low memory only):
- */
- bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
-- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
-+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
- PAGE_SIZE);
- if (bootmap == -1L)
- panic("Cannot find bootmem map of size %ld\n", bootmap_size);
-@@ -871,6 +870,12 @@ void __init mem_init(void)
+@@ -753,6 +752,12 @@ void __init mem_init(void)
pci_iommu_alloc();
@@ -19253,7 +18344,7 @@ index bca7909..313e4cb 100644
#ifdef CONFIG_FLATMEM
BUG_ON(!mem_map);
#endif
-@@ -888,7 +893,7 @@ void __init mem_init(void)
+@@ -770,7 +775,7 @@ void __init mem_init(void)
set_highmem_pages_init();
codesize = (unsigned long) &_etext - (unsigned long) &_text;
@@ -19262,7 +18353,7 @@ index bca7909..313e4cb 100644
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
-@@ -929,10 +934,10 @@ void __init mem_init(void)
+@@ -811,10 +816,10 @@ void __init mem_init(void)
((unsigned long)&__init_end -
(unsigned long)&__init_begin) >> 10,
@@ -19276,7 +18367,7 @@ index bca7909..313e4cb 100644
((unsigned long)&_etext - (unsigned long)&_text) >> 10);
/*
-@@ -1013,6 +1018,7 @@ void set_kernel_text_rw(void)
+@@ -892,6 +897,7 @@ void set_kernel_text_rw(void)
if (!kernel_set_to_readonly)
return;
@@ -19284,7 +18375,7 @@ index bca7909..313e4cb 100644
pr_debug("Set kernel text: %lx - %lx for read write\n",
start, start+size);
-@@ -1027,6 +1033,7 @@ void set_kernel_text_ro(void)
+@@ -906,6 +912,7 @@ void set_kernel_text_ro(void)
if (!kernel_set_to_readonly)
return;
@@ -19292,7 +18383,7 @@ index bca7909..313e4cb 100644
pr_debug("Set kernel text: %lx - %lx for read only\n",
start, start+size);
-@@ -1038,6 +1045,7 @@ void mark_rodata_ro(void)
+@@ -917,6 +924,7 @@ void mark_rodata_ro(void)
unsigned long start = PFN_ALIGN(_text);
unsigned long size = PFN_ALIGN(_etext) - start;
@@ -19300,19 +18391,10 @@ index bca7909..313e4cb 100644
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
printk(KERN_INFO "Write protecting the kernel text: %luk\n",
size >> 10);
-diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index ee41bba..94a8b29 100644
---- a/arch/x86/mm/init_64.c
-+++ b/arch/x86/mm/init_64.c
-@@ -50,7 +50,6 @@
- #include <asm/numa.h>
- #include <asm/cacheflush.h>
- #include <asm/init.h>
--#include <linux/bootmem.h>
-
- static unsigned long dma_reserve __initdata;
-
-@@ -74,7 +73,7 @@ early_param("gbpages", parse_direct_gbpages_on);
+diff -urNp linux-2.6.37/arch/x86/mm/init_64.c linux-2.6.37/arch/x86/mm/init_64.c
+--- linux-2.6.37/arch/x86/mm/init_64.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/init_64.c 2011-01-17 02:41:01.000000000 -0500
+@@ -72,7 +72,7 @@ early_param("gbpages", parse_direct_gbpa
* around without checking the pgd every time.
*/
@@ -19321,7 +18403,49 @@ index ee41bba..94a8b29 100644
EXPORT_SYMBOL_GPL(__supported_pte_mask);
int force_personality32;
-@@ -165,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
+@@ -106,12 +106,22 @@ void sync_global_pgds(unsigned long star
+ for (address = start; address <= end; address += PGDIR_SIZE) {
+ const pgd_t *pgd_ref = pgd_offset_k(address);
+ unsigned long flags;
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ unsigned long cpu;
++#else
+ struct page *page;
++#endif
+
+ if (pgd_none(*pgd_ref))
+ continue;
+
+ spin_lock_irqsave(&pgd_lock, flags);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ pgd_t *pgd = pgd_offset_cpu(cpu, address);
++#else
+ list_for_each_entry(page, &pgd_list, lru) {
+ pgd_t *pgd;
+ spinlock_t *pgt_lock;
+@@ -119,6 +129,7 @@ void sync_global_pgds(unsigned long star
+ pgd = (pgd_t *)page_address(page) + pgd_index(address);
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+ spin_lock(pgt_lock);
++#endif
+
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+@@ -126,7 +137,10 @@ void sync_global_pgds(unsigned long star
+ BUG_ON(pgd_page_vaddr(*pgd)
+ != pgd_page_vaddr(*pgd_ref));
+
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ spin_unlock(pgt_lock);
++#endif
++
+ }
+ spin_unlock_irqrestore(&pgd_lock, flags);
+ }
+@@ -200,7 +214,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
pmd = fill_pmd(pud, vaddr);
pte = fill_pte(pmd, vaddr);
@@ -19331,7 +18455,7 @@ index ee41bba..94a8b29 100644
/*
* It's enough to flush this one mapping.
-@@ -224,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
+@@ -259,14 +275,12 @@ static void __init __init_extra_mapping(
pgd = pgd_offset_k((unsigned long)__va(phys));
if (pgd_none(*pgd)) {
pud = (pud_t *) spp_getpage();
@@ -19348,7 +18472,7 @@ index ee41bba..94a8b29 100644
}
pmd = pmd_offset(pud, phys);
BUG_ON(!pmd_none(*pmd));
-@@ -680,6 +679,12 @@ void __init mem_init(void)
+@@ -706,6 +720,12 @@ void __init mem_init(void)
pci_iommu_alloc();
@@ -19361,7 +18485,7 @@ index ee41bba..94a8b29 100644
/* clear_bss() already clear the empty_zero_page */
reservedpages = 0;
-@@ -886,8 +891,8 @@ int kern_addr_valid(unsigned long addr)
+@@ -866,8 +886,8 @@ int kern_addr_valid(unsigned long addr)
static struct vm_area_struct gate_vma = {
.vm_start = VSYSCALL_START,
.vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
@@ -19372,7 +18496,7 @@ index ee41bba..94a8b29 100644
};
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-@@ -921,7 +926,7 @@ int in_gate_area_no_task(unsigned long addr)
+@@ -901,7 +921,7 @@ int in_gate_area_no_task(unsigned long a
const char *arch_vma_name(struct vm_area_struct *vma)
{
@@ -19381,12 +18505,129 @@ index ee41bba..94a8b29 100644
return "[vdso]";
if (vma == &gate_vma)
return "[vsyscall]";
-diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
-index 84e236c..69bd3f6 100644
---- a/arch/x86/mm/iomap_32.c
-+++ b/arch/x86/mm/iomap_32.c
-@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
- debug_kmap_atomic(type);
+diff -urNp linux-2.6.37/arch/x86/mm/init.c linux-2.6.37/arch/x86/mm/init.c
+--- linux-2.6.37/arch/x86/mm/init.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/init.c 2011-01-17 02:41:01.000000000 -0500
+@@ -72,11 +72,7 @@ static void __init find_early_table_spac
+ * cause a hotspot and fill up ZONE_DMA. The page tables
+ * need roughly 0.5KB per GB.
+ */
+-#ifdef CONFIG_X86_32
+- start = 0x7000;
+-#else
+- start = 0x8000;
+-#endif
++ start = 0x100000;
+ base = memblock_find_in_range(start, max_pfn_mapped<<PAGE_SHIFT,
+ tables, PAGE_SIZE);
+ if (base == MEMBLOCK_ERROR)
+@@ -323,7 +319,13 @@ unsigned long __init_refok init_memory_m
+ */
+ int devmem_is_allowed(unsigned long pagenr)
+ {
+- if (pagenr <= 256)
++ if (!pagenr)
++ return 1;
++#ifdef CONFIG_VM86
++ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
++ return 1;
++#endif
++ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
+ return 1;
+ if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+ return 0;
+@@ -382,6 +384,86 @@ void free_init_pages(char *what, unsigne
+
+ void free_initmem(void)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++ /* PaX: limit KERNEL_CS to actual size */
++ unsigned long addr, limit;
++ struct desc_struct d;
++ int cpu;
++
++ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++
++ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
++ }
++
++ /* PaX: make KERNEL_CS read-only */
++ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
++ if (!paravirt_enabled())
++ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
++/*
++ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++*/
++#ifdef CONFIG_X86_PAE
++ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
++/*
++ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++*/
++#endif
++
++#ifdef CONFIG_MODULES
++ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
++#endif
++
++#else
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ unsigned long addr, end;
++
++ /* PaX: make kernel code/rodata read-only, rest non-executable */
++ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if (!pmd_present(*pmd))
++ continue;
++ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ else
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++
++ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
++ end = addr + KERNEL_IMAGE_SIZE;
++ for (; addr < end; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if (!pmd_present(*pmd))
++ continue;
++ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++#endif
++
++ flush_tlb_all();
++#endif
++
+ free_init_pages("unused kernel memory",
+ (unsigned long)(&__init_begin),
+ (unsigned long)(&__init_end));
+diff -urNp linux-2.6.37/arch/x86/mm/iomap_32.c linux-2.6.37/arch/x86/mm/iomap_32.c
+--- linux-2.6.37/arch/x86/mm/iomap_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/iomap_32.c 2011-01-17 02:41:01.000000000 -0500
+@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+
@@ -19397,19 +18638,11 @@ index 84e236c..69bd3f6 100644
arch_flush_lazy_mmu_mode();
return (void *)vaddr;
-diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
-index 12e4d2d..66f0373 100644
---- a/arch/x86/mm/ioremap.c
-+++ b/arch/x86/mm/ioremap.c
-@@ -100,13 +100,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
- /*
- * Don't allow anybody to remap normal RAM that we're using..
- */
-- for (pfn = phys_addr >> PAGE_SHIFT;
-- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
-- pfn++) {
--
-+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
+diff -urNp linux-2.6.37/arch/x86/mm/ioremap.c linux-2.6.37/arch/x86/mm/ioremap.c
+--- linux-2.6.37/arch/x86/mm/ioremap.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/ioremap.c 2011-01-17 02:41:01.000000000 -0500
+@@ -104,7 +104,7 @@ static void __iomem *__ioremap_caller(re
+ for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
int is_ram = page_is_ram(pfn);
- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
@@ -19417,7 +18650,7 @@ index 12e4d2d..66f0373 100644
return NULL;
WARN_ON_ONCE(is_ram);
}
-@@ -346,7 +343,7 @@ static int __init early_ioremap_debug_setup(char *str)
+@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
early_param("early_ioremap_debug", early_ioremap_debug_setup);
static __initdata int after_paging_init;
@@ -19426,7 +18659,7 @@ index 12e4d2d..66f0373 100644
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
{
-@@ -378,8 +375,7 @@ void __init early_ioremap_init(void)
+@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
@@ -19436,11 +18669,10 @@ index 12e4d2d..66f0373 100644
/*
* The boot-ioremap range spans multiple pmds, for which
-diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
-index b3b531a..b65b190 100644
---- a/arch/x86/mm/kmemcheck/kmemcheck.c
-+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
-@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
+diff -urNp linux-2.6.37/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.37/arch/x86/mm/kmemcheck/kmemcheck.c
+--- linux-2.6.37/arch/x86/mm/kmemcheck/kmemcheck.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/kmemcheck/kmemcheck.c 2011-01-17 02:41:01.000000000 -0500
+@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
* memory (e.g. tracked pages)? For now, we need this to avoid
* invoking kmemcheck for PnP BIOS calls.
*/
@@ -19452,11 +18684,10 @@ index b3b531a..b65b190 100644
return false;
pte = kmemcheck_pte_lookup(address);
-diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
-index 1dab519..60a7e5f 100644
---- a/arch/x86/mm/mmap.c
-+++ b/arch/x86/mm/mmap.c
-@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
+diff -urNp linux-2.6.37/arch/x86/mm/mmap.c linux-2.6.37/arch/x86/mm/mmap.c
+--- linux-2.6.37/arch/x86/mm/mmap.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/mmap.c 2011-01-17 02:41:01.000000000 -0500
+@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
* Leave an at least ~128 MB hole with possible stack randomization.
*/
#define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
@@ -19511,7 +18742,7 @@ index 1dab519..60a7e5f 100644
return TASK_UNMAPPED_BASE + mmap_rnd();
}
-@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
+@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
void arch_pick_mmap_layout(struct mm_struct *mm)
{
if (mmap_is_legacy()) {
@@ -19537,11 +18768,10 @@ index 1dab519..60a7e5f 100644
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
-diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
-index 809baaa..e3892a3 100644
---- a/arch/x86/mm/numa_32.c
-+++ b/arch/x86/mm/numa_32.c
-@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
+diff -urNp linux-2.6.37/arch/x86/mm/numa_32.c linux-2.6.37/arch/x86/mm/numa_32.c
+--- linux-2.6.37/arch/x86/mm/numa_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/numa_32.c 2011-01-17 02:41:01.000000000 -0500
+@@ -99,7 +99,6 @@ unsigned long node_memmap_size_bytes(int
}
#endif
@@ -19549,24 +18779,10 @@ index 809baaa..e3892a3 100644
extern unsigned long highend_pfn, highstart_pfn;
#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
-diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
-index e1d1069..2251ff3 100644
---- a/arch/x86/mm/pageattr-test.c
-+++ b/arch/x86/mm/pageattr-test.c
-@@ -36,7 +36,7 @@ enum {
-
- static int pte_testbit(pte_t pte)
- {
-- return pte_flags(pte) & _PAGE_UNUSED1;
-+ return pte_flags(pte) & _PAGE_CPA_TEST;
- }
-
- struct split_state {
-diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-index 532e793..8cddbe9 100644
---- a/arch/x86/mm/pageattr.c
-+++ b/arch/x86/mm/pageattr.c
-@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+diff -urNp linux-2.6.37/arch/x86/mm/pageattr.c linux-2.6.37/arch/x86/mm/pageattr.c
+--- linux-2.6.37/arch/x86/mm/pageattr.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/pageattr.c 2011-01-17 02:41:01.000000000 -0500
+@@ -261,16 +261,17 @@ static inline pgprot_t static_protection
* PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
*/
if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
@@ -19587,7 +18803,7 @@ index 532e793..8cddbe9 100644
/*
* The .rodata section needs to be read-only. Using the pfn
* catches all aliases.
-@@ -278,6 +279,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+@@ -278,6 +279,7 @@ static inline pgprot_t static_protection
if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
__pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
pgprot_val(forbidden) |= _PAGE_RW;
@@ -19595,7 +18811,7 @@ index 532e793..8cddbe9 100644
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
/*
-@@ -316,6 +318,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+@@ -316,6 +318,13 @@ static inline pgprot_t static_protection
}
#endif
@@ -19649,10 +18865,21 @@ index 532e793..8cddbe9 100644
}
static int
-diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
-index 64121a1..bfb36ea 100644
---- a/arch/x86/mm/pat.c
-+++ b/arch/x86/mm/pat.c
+diff -urNp linux-2.6.37/arch/x86/mm/pageattr-test.c linux-2.6.37/arch/x86/mm/pageattr-test.c
+--- linux-2.6.37/arch/x86/mm/pageattr-test.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/pageattr-test.c 2011-01-17 02:41:01.000000000 -0500
+@@ -36,7 +36,7 @@ enum {
+
+ static int pte_testbit(pte_t pte)
+ {
+- return pte_flags(pte) & _PAGE_UNUSED1;
++ return pte_flags(pte) & _PAGE_CPA_TEST;
+ }
+
+ struct split_state {
+diff -urNp linux-2.6.37/arch/x86/mm/pat.c linux-2.6.37/arch/x86/mm/pat.c
+--- linux-2.6.37/arch/x86/mm/pat.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/pat.c 2011-01-17 02:41:01.000000000 -0500
@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
if (!entry) {
@@ -19662,7 +18889,7 @@ index 64121a1..bfb36ea 100644
return -EINVAL;
}
-@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
while (cursor < to) {
if (!devmem_is_allowed(pfn)) {
printk(KERN_INFO
@@ -19673,7 +18900,7 @@ index 64121a1..bfb36ea 100644
return 0;
}
cursor += PAGE_SIZE;
-@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
+@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
printk(KERN_INFO
"%s:%d ioremap_change_attr failed %s "
"for %Lx-%Lx\n",
@@ -19682,7 +18909,7 @@ index 64121a1..bfb36ea 100644
cattr_name(flags),
base, (unsigned long long)(base + size));
return -EINVAL;
-@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
if (want_flags != flags) {
printk(KERN_WARNING
"%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
@@ -19691,7 +18918,7 @@ index 64121a1..bfb36ea 100644
cattr_name(want_flags),
(unsigned long long)paddr,
(unsigned long long)(paddr + size),
-@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
free_memtype(paddr, paddr + size);
printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
" for %Lx-%Lx, got %s\n",
@@ -19700,11 +18927,27 @@ index 64121a1..bfb36ea 100644
cattr_name(want_flags),
(unsigned long long)paddr,
(unsigned long long)(paddr + size),
-diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
-index 5c4ee42..9ff06a5 100644
---- a/arch/x86/mm/pgtable.c
-+++ b/arch/x86/mm/pgtable.c
-@@ -84,8 +84,58 @@ static inline void pgd_list_del(pgd_t *pgd)
+diff -urNp linux-2.6.37/arch/x86/mm/pgtable_32.c linux-2.6.37/arch/x86/mm/pgtable_32.c
+--- linux-2.6.37/arch/x86/mm/pgtable_32.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/pgtable_32.c 2011-01-17 02:41:01.000000000 -0500
+@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
+ return;
+ }
+ pte = pte_offset_kernel(pmd, vaddr);
++
++ pax_open_kernel();
+ if (pte_val(pteval))
+ set_pte_at(&init_mm, vaddr, pte, pteval);
+ else
+ pte_clear(&init_mm, vaddr, pte);
++ pax_close_kernel();
+
+ /*
+ * It's enough to flush this one mapping.
+diff -urNp linux-2.6.37/arch/x86/mm/pgtable.c linux-2.6.37/arch/x86/mm/pgtable.c
+--- linux-2.6.37/arch/x86/mm/pgtable.c 2011-01-04 19:50:19.000000000 -0500
++++ linux-2.6.37/arch/x86/mm/pgtable.c 2011-01-17 02:41:01.000000000 -0500
+@@ -84,9 +84,58 @@ static inline void pgd_list_del(pgd_t *p
list_del(&page->lru);
}
@@ -19712,11 +18955,11 @@ index 5c4ee42..9ff06a5 100644
- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
-+
+
+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
+{
+ while (count--)
-+ *dst++ = __pgd((pgd_val(*src++) | _PAGE_NX) & ~_PAGE_USER);
++ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
+}
+#endif
+
@@ -19735,7 +18978,7 @@ index 5c4ee42..9ff06a5 100644
+#endif
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
-+static inline void pgd_ctor(pgd_t *pgd) {}
++static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
+static inline void pgd_dtor(pgd_t *pgd) {}
+#ifdef CONFIG_X86_64
+#define pxd_t pud_t
@@ -19763,9 +19006,9 @@ index 5c4ee42..9ff06a5 100644
+#define pyd_offset(mm ,address) pud_offset((mm), (address))
+#define PYD_SIZE PUD_SIZE
- static void pgd_ctor(pgd_t *pgd)
+ static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
{
-@@ -120,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
+@@ -130,6 +179,7 @@ static void pgd_dtor(pgd_t *pgd)
pgd_list_del(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
}
@@ -19773,7 +19016,7 @@ index 5c4ee42..9ff06a5 100644
/*
* List of all pgd's needed for non-PAE so it can invalidate entries
-@@ -132,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
+@@ -142,7 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
* -- wli
*/
@@ -19782,7 +19025,7 @@ index 5c4ee42..9ff06a5 100644
/*
* In PAE mode, we need to do a cr3 reload (=tlb flush) when
* updating the top-level pagetable entries to guarantee the
-@@ -144,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
+@@ -154,7 +204,7 @@ static void pgd_dtor(pgd_t *pgd)
* not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
* and initialize the kernel pmds here.
*/
@@ -19791,7 +19034,7 @@ index 5c4ee42..9ff06a5 100644
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
-@@ -163,36 +214,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+@@ -173,36 +223,38 @@ void pud_populate(struct mm_struct *mm,
if (mm == current->active_mm)
write_cr3(read_cr3());
}
@@ -19841,7 +19084,7 @@ index 5c4ee42..9ff06a5 100644
return -ENOMEM;
}
-@@ -205,51 +258,56 @@ static int preallocate_pmds(pmd_t *pmds[])
+@@ -215,51 +267,56 @@ static int preallocate_pmds(pmd_t *pmds[
* preallocate which never got a corresponding vma will need to be
* freed manually.
*/
@@ -19915,7 +19158,7 @@ index 5c4ee42..9ff06a5 100644
unsigned long flags;
pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
-@@ -259,11 +317,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+@@ -269,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
mm->pgd = pgd;
@@ -19929,10 +19172,10 @@ index 5c4ee42..9ff06a5 100644
/*
* Make sure that pre-populating the pmds is atomic with
-@@ -273,14 +331,14 @@ pgd_t *pgd_alloc(str