aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2013-08-06 06:39:23 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2013-08-06 06:39:23 +0000
commit2f916b4a5031fccc91681f906ac28f8b7e12c639 (patch)
tree5baf5cc333754d2f9fb65ccf949984f8d1cabb0b
parentfdf39794195592b4da31471a1485250fc132f143 (diff)
main/linux-grsec: upgrade to 3.10.5
-rw-r--r--main/linux-grsec/APKBUILD16
-rw-r--r--main/linux-grsec/grsecurity-2.9.1-3.10.5-201308052154.patch (renamed from main/linux-grsec/grsecurity-2.9.1-3.10.4-201308030031.patch)487
2 files changed, 235 insertions, 268 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD
index 00e9958e2e6..7f8f3ff6b2b 100644
--- a/main/linux-grsec/APKBUILD
+++ b/main/linux-grsec/APKBUILD
@@ -2,7 +2,7 @@
_flavor=grsec
pkgname=linux-${_flavor}
-pkgver=3.10.4
+pkgver=3.10.5
case $pkgver in
*.*.*) _kernver=${pkgver%.*};;
*.*) _kernver=${pkgver};;
@@ -17,7 +17,7 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz
http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz
- grsecurity-2.9.1-3.10.4-201308030031.patch
+ grsecurity-2.9.1-3.10.5-201308052154.patch
0001-net-inform-NETDEV_CHANGE-callbacks-which-flags-were-.patch
0002-arp-flush-arp-cache-on-IFF_NOARP-change.patch
@@ -149,8 +149,8 @@ dev() {
}
md5sums="4f25cd5bec5f8d5a7d935b3f2ccb8481 linux-3.10.tar.xz
-2e46ab138670b3171b52b849568cb42f patch-3.10.4.xz
-62bb9b34f874425600ada2817ddf23b6 grsecurity-2.9.1-3.10.4-201308030031.patch
+6366a8d4b0429ab6836c296ba298fb0e patch-3.10.5.xz
+e214ec80b95e11df16f1b8d6a9e617fc grsecurity-2.9.1-3.10.5-201308052154.patch
a16f11b12381efb3bec79b9bfb329836 0001-net-inform-NETDEV_CHANGE-callbacks-which-flags-were-.patch
656ae7b10dd2f18dbfa1011041d08d60 0002-arp-flush-arp-cache-on-IFF_NOARP-change.patch
aa454ffb96428586447775c21449e284 0003-ipv4-properly-refresh-rtable-entries-on-pmtu-redirec.patch
@@ -160,8 +160,8 @@ aa454ffb96428586447775c21449e284 0003-ipv4-properly-refresh-rtable-entries-on-p
1a111abaeb381bf47d9e979a85fba2ee kernelconfig.x86
1312267644d0c729bd7c7af979b29c8d kernelconfig.x86_64"
sha256sums="df27fa92d27a9c410bfe6c4a89f141638500d7eadcca5cce578954efc2ad3544 linux-3.10.tar.xz
-d8ef39930663cc916e57e06b308a1654f2a03903a3c5a0d3a5503c6d58e2b2b8 patch-3.10.4.xz
-3acf56b31b85298c56cee026fd01f3b0643c44743bd61476db85f15d9209c72b grsecurity-2.9.1-3.10.4-201308030031.patch
+c96b69a10ef5ade798dcaa1867df156ccc9e173225d5aa427d00c6e89246e035 patch-3.10.5.xz
+0fce4515e69d73d580134e8e9ac19b80e0e603315ae259b1954a62f3f444883a grsecurity-2.9.1-3.10.5-201308052154.patch
6af3757ac36a6cd3cda7b0a71b08143726383b19261294a569ad7f4042c72df3 0001-net-inform-NETDEV_CHANGE-callbacks-which-flags-were-.patch
dc8e82108615657f1fb9d641efd42255a5761c06edde1b00a41ae0d314d548f0 0002-arp-flush-arp-cache-on-IFF_NOARP-change.patch
0985caa0f3ee8ed0959aeaa4214f5f8057ae8e61d50dcae39194912d31e14892 0003-ipv4-properly-refresh-rtable-entries-on-pmtu-redirec.patch
@@ -171,8 +171,8 @@ fc613ac466610b866b721c41836fd5bfb2d4b75bceb67972dc6369d7f62ff47e 0006-ipv4-use-
1ef74cf3703dd26201970a2d9f043fed7e03ad2540a20f810cec8add93f81ccd kernelconfig.x86
1c4b4a74d982fdc8d3baddcdaa674ae4b4a3390daba024fca55e85604af74507 kernelconfig.x86_64"
sha512sums="5fb109fcbd59bf3dffc911b853894f0a84afa75151368f783a1252c5ff60c7a1504de216c0012be446df983e2dea400ad8eeed3ce04f24dc61d0ef76c174dc35 linux-3.10.tar.xz
-382adb3faf7feda6c5dd8f401c0ad0a2dbbc62e33d5f85d4181a56567abdec3be5b5279d35829a3a9a53a54949ea10de3f31c4256eba1f132fda79197af46819 patch-3.10.4.xz
-e39d89f2c5e20e642488234a7a0967f8f46239e267eb73a1ab2375b4cf58528ebb239e4a47a102b674e0deb7b0aa713a1d037f97a47a77f87dcca8217ffe00ad grsecurity-2.9.1-3.10.4-201308030031.patch
+583c1301ae362a2eee26253b477d78d472d7db1ff736491dcaf67a76a8badcfe103c0cfdde8cd2a0c2becb2017a11d522f417a4754f8838ed88d6f4a42dab738 patch-3.10.5.xz
+e56d207163b8c17bd63564ebbe916458ebcc892016216f98f395f3e208229d6533c2cfbe1463400526cde9eed3beb153725ac98ee6dfe27b46ef28679de0a24f grsecurity-2.9.1-3.10.5-201308052154.patch
81e78593288e8b0fd2c03ea9fc1450323887707f087e911f172450a122bc9b591ee83394836789730d951aeec13d0b75a64e1c05f04364abf8f80d883ddc4a02 0001-net-inform-NETDEV_CHANGE-callbacks-which-flags-were-.patch
51ecb15b669f6a82940a13a38939116e003bf5dfd24496771c8279e907b72adcc63d607f0340a2940d757e12ddadb7d45c7af78ae311d284935a6296dbcac00c 0002-arp-flush-arp-cache-on-IFF_NOARP-change.patch
57d0a8bd35d19cf657ded58efe24517d2252aec6984040713ba173a34edb5887ececaa2985076bc6a149eaa57639fd98a042c1c2d226ed4ad8dd5ed0e230717e 0003-ipv4-properly-refresh-rtable-entries-on-pmtu-redirec.patch
diff --git a/main/linux-grsec/grsecurity-2.9.1-3.10.4-201308030031.patch b/main/linux-grsec/grsecurity-2.9.1-3.10.5-201308052154.patch
index 9cf40260398..f2633c140b2 100644
--- a/main/linux-grsec/grsecurity-2.9.1-3.10.4-201308030031.patch
+++ b/main/linux-grsec/grsecurity-2.9.1-3.10.5-201308052154.patch
@@ -267,7 +267,7 @@ index 2fe6e76..df58221 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index b4df9b2..256e7cc 100644
+index f8349d0..563a504 100644
--- a/Makefile
+++ b/Makefile
@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -16647,10 +16647,10 @@ index 230c8ea..f915130 100644
* HP laptops which use a DSDT reporting as HP/SB400/10000,
* which includes some code which overrides all temperature
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
-index b44577b..27d8443 100644
+index ec94e11..7fbbec0 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
-@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
+@@ -88,8 +88,12 @@ int acpi_suspend_lowlevel(void)
#else /* CONFIG_64BIT */
#ifdef CONFIG_SMP
stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
@@ -17650,7 +17650,7 @@ index e9a701a..35317d6 100644
wmb();
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
-index 726bf96..81f0526 100644
+index ca22b73..9987afe 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
@@ -20620,7 +20620,7 @@ index 73afd11..d1670f5 100644
+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
+ .endr
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index 321d65e..863089b 100644
+index a836860..bdeb7a5 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -20,6 +20,8 @@
@@ -20862,25 +20862,23 @@ index 321d65e..863089b 100644
#include "../../x86/xen/xen-head.S"
-
- .section .bss, "aw", @nobits
-- .align L1_CACHE_BYTES
--ENTRY(idt_table)
++
++ .section .rodata,"a",@progbits
++NEXT_PAGE(empty_zero_page)
++ .skip PAGE_SIZE
++
+ .align PAGE_SIZE
+ ENTRY(idt_table)
- .skip IDT_ENTRIES * 16
++ .fill 512,8,0
-- .align L1_CACHE_BYTES
--ENTRY(nmi_idt_table)
+ .align L1_CACHE_BYTES
+ ENTRY(nmi_idt_table)
- .skip IDT_ENTRIES * 16
-
- __PAGE_ALIGNED_BSS
-+ .section .rodata,"a",@progbits
- NEXT_PAGE(empty_zero_page)
- .skip PAGE_SIZE
-+
-+ .align L1_CACHE_BYTES
-+ENTRY(idt_table)
-+ .fill 512,8,0
-+
-+ .align L1_CACHE_BYTES
-+ENTRY(nmi_idt_table)
+-NEXT_PAGE(empty_zero_page)
+- .skip PAGE_SIZE
+ .fill 512,8,0
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 0fa6912..37fce70 100644
@@ -35636,7 +35634,7 @@ index e913d32..4d9b351 100644
if (IS_GEN6(dev) || IS_GEN7(dev)) {
seq_printf(m,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 3b315ba..aac280f 100644
+index f968590..19115e35 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1259,7 +1259,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
@@ -35649,10 +35647,10 @@ index 3b315ba..aac280f 100644
return can_switch;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 9669a0b..bb65176 100644
+index 47d8b68..52f5d8d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -915,7 +915,7 @@ typedef struct drm_i915_private {
+@@ -916,7 +916,7 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
@@ -35661,7 +35659,7 @@ index 9669a0b..bb65176 100644
/* protects the irq masks */
spinlock_t irq_lock;
-@@ -1811,7 +1811,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
+@@ -1813,7 +1813,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
struct drm_i915_private *dev_priv, unsigned port);
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
@@ -35825,10 +35823,10 @@ index e5e32869..1678f36 100644
iir = I915_READ(IIR);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 56746dc..b5a214f 100644
+index e1f4e6e..c94a4b3 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -8919,13 +8919,13 @@ struct intel_quirk {
+@@ -8933,13 +8933,13 @@ struct intel_quirk {
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
@@ -35844,7 +35842,7 @@ index 56746dc..b5a214f 100644
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
{
-@@ -8933,18 +8933,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -8947,18 +8947,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
return 1;
}
@@ -38401,10 +38399,10 @@ index 5a2c754..0fa55db 100644
seq_printf(seq, "\n");
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
-index aa04f02..2a1309e 100644
+index 81a79b7..87a0f73 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
-@@ -1694,7 +1694,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
+@@ -1697,7 +1697,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
cmd == DM_LIST_VERSIONS_CMD)
return 0;
@@ -38567,7 +38565,7 @@ index 60bce43..9b997d0 100644
pmd->bl_info.value_type.inc = data_block_inc;
pmd->bl_info.value_type.dec = data_block_dec;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index d5370a9..8761bbc 100644
+index 33f2010..23fb84c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -169,9 +169,9 @@ struct mapped_device {
@@ -38582,7 +38580,7 @@ index d5370a9..8761bbc 100644
struct list_head uevent_list;
spinlock_t uevent_lock; /* Protect access to uevent_list */
-@@ -1877,8 +1877,8 @@ static struct mapped_device *alloc_dev(int minor)
+@@ -1884,8 +1884,8 @@ static struct mapped_device *alloc_dev(int minor)
rwlock_init(&md->map_lock);
atomic_set(&md->holders, 1);
atomic_set(&md->open_count, 0);
@@ -38593,7 +38591,7 @@ index d5370a9..8761bbc 100644
INIT_LIST_HEAD(&md->uevent_list);
spin_lock_init(&md->uevent_lock);
-@@ -2026,7 +2026,7 @@ static void event_callback(void *context)
+@@ -2033,7 +2033,7 @@ static void event_callback(void *context)
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
@@ -38602,7 +38600,7 @@ index d5370a9..8761bbc 100644
wake_up(&md->eventq);
}
-@@ -2683,18 +2683,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+@@ -2690,18 +2690,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
@@ -38625,7 +38623,7 @@ index d5370a9..8761bbc 100644
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
diff --git a/drivers/md/md.c b/drivers/md/md.c
-index 9b82377..6b6922d 100644
+index 51f0345..c77810e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -234,10 +234,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
@@ -38775,7 +38773,7 @@ index 3e6d115..ffecdeb 100644
/*----------------------------------------------------------------*/
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index 6e17f81..140f717 100644
+index 6f48244..7d29145 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1822,7 +1822,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
@@ -38787,7 +38785,7 @@ index 6e17f81..140f717 100644
}
sectors -= s;
sect += s;
-@@ -2042,7 +2042,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+@@ -2049,7 +2049,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
test_bit(In_sync, &rdev->flags)) {
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
@@ -38797,7 +38795,7 @@ index 6e17f81..140f717 100644
"md/raid1:%s: read error corrected "
"(%d sectors at %llu on %s)\n",
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
-index d61eb7e..adfd00a 100644
+index 081bb33..3c4b287 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1940,7 +1940,7 @@ static void end_sync_read(struct bio *bio, int error)
@@ -38809,7 +38807,7 @@ index d61eb7e..adfd00a 100644
&conf->mirrors[d].rdev->corrected_errors);
/* for reconstruct, we always reschedule after a read.
-@@ -2292,7 +2292,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+@@ -2298,7 +2298,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
{
struct timespec cur_time_mon;
unsigned long hours_since_last;
@@ -38818,7 +38816,7 @@ index d61eb7e..adfd00a 100644
ktime_get_ts(&cur_time_mon);
-@@ -2314,9 +2314,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+@@ -2320,9 +2320,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
* overflowing the shift of read_errors by hours_since_last.
*/
if (hours_since_last >= 8 * sizeof(read_errors))
@@ -38830,7 +38828,7 @@ index d61eb7e..adfd00a 100644
}
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
-@@ -2370,8 +2370,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+@@ -2376,8 +2376,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
return;
check_decay_read_errors(mddev, rdev);
@@ -38841,7 +38839,7 @@ index d61eb7e..adfd00a 100644
char b[BDEVNAME_SIZE];
bdevname(rdev->bdev, b);
-@@ -2379,7 +2379,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+@@ -2385,7 +2385,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
"md/raid10:%s: %s: Raid device exceeded "
"read_error threshold [cur %d:max %d]\n",
mdname(mddev), b,
@@ -38850,7 +38848,7 @@ index d61eb7e..adfd00a 100644
printk(KERN_NOTICE
"md/raid10:%s: %s: Failing raid device\n",
mdname(mddev), b);
-@@ -2534,7 +2534,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+@@ -2540,7 +2540,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
sect +
choose_data_offset(r10_bio, rdev)),
bdevname(rdev->bdev, b));
@@ -38860,7 +38858,7 @@ index d61eb7e..adfd00a 100644
rdev_dec_pending(rdev, mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index 05e4a10..48fbe37 100644
+index a35b846..e295c6d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1764,21 +1764,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
@@ -40348,6 +40346,37 @@ index b0c3de9..fc5857e 100644
} else {
return -EIO;
}
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+index 6acf82b..14b097e 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+@@ -206,10 +206,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to set driver version in firmware\n");
+- return -EIO;
++ err = -EIO;
+ }
+-
+- return 0;
++ qlcnic_free_mbx_args(&cmd);
++ return err;
+ }
+
+ int
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+index d3f8797..82a03d3 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+@@ -262,7 +262,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+
+ mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
+ mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+- memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
++ memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
+
+ vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
+ vlan_req->vlan_id = cpu_to_le16(vlan_id);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 393f961..d343034 100644
--- a/drivers/net/ethernet/realtek/r8169.c
@@ -43062,10 +43091,10 @@ index f379c7f..e8fc69c 100644
transport_setup_device(&rport->dev);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index 1b1125e..31a2019 100644
+index 610417e..1544fa9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
-@@ -2936,7 +2936,7 @@ static int sd_probe(struct device *dev)
+@@ -2928,7 +2928,7 @@ static int sd_probe(struct device *dev)
sdkp->disk = gd;
sdkp->index = index;
atomic_set(&sdkp->openers, 0);
@@ -44412,7 +44441,7 @@ index 1afe192..73d2c20 100644
kfree(ld);
raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
-index 121aeb9..0d2c4b9 100644
+index f597e88..b7f68ed 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -232,7 +232,7 @@ void tty_port_hangup(struct tty_port *port)
@@ -44424,7 +44453,7 @@ index 121aeb9..0d2c4b9 100644
port->flags &= ~ASYNC_NORMAL_ACTIVE;
tty = port->tty;
if (tty)
-@@ -391,7 +391,7 @@ int tty_port_block_til_ready(struct tty_port *port,
+@@ -390,7 +390,7 @@ int tty_port_block_til_ready(struct tty_port *port,
/* The port lock protects the port counts */
spin_lock_irqsave(&port->lock, flags);
if (!tty_hung_up_p(filp))
@@ -44433,7 +44462,7 @@ index 121aeb9..0d2c4b9 100644
port->blocked_open++;
spin_unlock_irqrestore(&port->lock, flags);
-@@ -433,7 +433,7 @@ int tty_port_block_til_ready(struct tty_port *port,
+@@ -432,7 +432,7 @@ int tty_port_block_til_ready(struct tty_port *port,
we must not mess that up further */
spin_lock_irqsave(&port->lock, flags);
if (!tty_hung_up_p(filp))
@@ -44442,7 +44471,7 @@ index 121aeb9..0d2c4b9 100644
port->blocked_open--;
if (retval == 0)
port->flags |= ASYNC_NORMAL_ACTIVE;
-@@ -467,19 +467,19 @@ int tty_port_close_start(struct tty_port *port,
+@@ -466,19 +466,19 @@ int tty_port_close_start(struct tty_port *port,
return 0;
}
@@ -44469,7 +44498,7 @@ index 121aeb9..0d2c4b9 100644
spin_unlock_irqrestore(&port->lock, flags);
if (port->ops->drop)
port->ops->drop(port);
-@@ -565,7 +565,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
+@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
{
spin_lock_irq(&port->lock);
if (!tty_hung_up_p(filp))
@@ -48683,7 +48712,7 @@ index bce8769..7fc7544 100644
fd_offset + ex.a_text);
if (error != N_DATADDR(ex)) {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index f8a0b0e..8c841c3 100644
+index f8a0b0e..6f036ed 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -34,6 +34,7 @@
@@ -49497,7 +49526,7 @@ index f8a0b0e..8c841c3 100644
+ unsigned long oldflags;
+ bool is_textrel_rw, is_textrel_rx, is_relro;
+
-+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
++ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
+ return;
+
+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
@@ -49505,15 +49534,15 @@ index f8a0b0e..8c841c3 100644
+
+#ifdef CONFIG_PAX_ELFRELOCS
+ /* possible TEXTREL */
-+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
-+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
++ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
++ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
+#else
+ is_textrel_rw = false;
+ is_textrel_rx = false;
+#endif
+
+ /* possible RELRO */
-+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
++ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
+
+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
+ return;
@@ -54516,10 +54545,10 @@ index e76244e..9fe8f2f1 100644
/* Don't cache excessive amounts of data and XDR failures */
if (!statp || len > (256 >> 2)) {
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
-index 84ce601..633d226 100644
+index baf149a..76b86ad 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
-@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+@@ -940,7 +940,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
} else {
oldfs = get_fs();
set_fs(KERNEL_DS);
@@ -54528,7 +54557,7 @@ index 84ce601..633d226 100644
set_fs(oldfs);
}
-@@ -1026,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+@@ -1027,7 +1027,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
/* Write the data. */
oldfs = get_fs(); set_fs(KERNEL_DS);
@@ -54537,7 +54566,7 @@ index 84ce601..633d226 100644
set_fs(oldfs);
if (host_err < 0)
goto out_nfserr;
-@@ -1572,7 +1572,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
+@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
*/
oldfs = get_fs(); set_fs(KERNEL_DS);
@@ -56975,63 +57004,6 @@ index 04ce1ac..a13dd1e 100644
generic_fillattr(inode, stat);
return 0;
-diff --git a/fs/super.c b/fs/super.c
-index 7465d43..68307c0 100644
---- a/fs/super.c
-+++ b/fs/super.c
-@@ -336,19 +336,19 @@ EXPORT_SYMBOL(deactivate_super);
- * and want to turn it into a full-blown active reference. grab_super()
- * is called with sb_lock held and drops it. Returns 1 in case of
- * success, 0 if we had failed (superblock contents was already dead or
-- * dying when grab_super() had been called).
-+ * dying when grab_super() had been called). Note that this is only
-+ * called for superblocks not in rundown mode (== ones still on ->fs_supers
-+ * of their type), so increment of ->s_count is OK here.
- */
- static int grab_super(struct super_block *s) __releases(sb_lock)
- {
-- if (atomic_inc_not_zero(&s->s_active)) {
-- spin_unlock(&sb_lock);
-- return 1;
-- }
-- /* it's going away */
- s->s_count++;
- spin_unlock(&sb_lock);
-- /* wait for it to die */
- down_write(&s->s_umount);
-+ if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
-+ put_super(s);
-+ return 1;
-+ }
- up_write(&s->s_umount);
- put_super(s);
- return 0;
-@@ -463,11 +463,6 @@ retry:
- destroy_super(s);
- s = NULL;
- }
-- down_write(&old->s_umount);
-- if (unlikely(!(old->s_flags & MS_BORN))) {
-- deactivate_locked_super(old);
-- goto retry;
-- }
- return old;
- }
- }
-@@ -660,10 +655,10 @@ restart:
- if (hlist_unhashed(&sb->s_instances))
- continue;
- if (sb->s_bdev == bdev) {
-- if (grab_super(sb)) /* drops sb_lock */
-- return sb;
-- else
-+ if (!grab_super(sb))
- goto restart;
-+ up_write(&sb->s_umount);
-+ return sb;
- }
- }
- spin_unlock(&sb_lock);
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 15c68f9..36a8b3e 100644
--- a/fs/sysfs/bin.c
@@ -58499,7 +58471,7 @@ index 0000000..36845aa
+endif
diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
new file mode 100644
-index 0000000..6907918
+index 0000000..c0793fd
--- /dev/null
+++ b/grsecurity/gracl.c
@@ -0,0 +1,4178 @@
@@ -61757,7 +61729,7 @@ index 0000000..6907918
+ unsigned char *sprole_sum = NULL;
+ int error = 0;
+ int error2 = 0;
-+ size_t req_count;
++ size_t req_count = 0;
+
+ mutex_lock(&gr_dev_mutex);
+
@@ -76398,7 +76370,7 @@ index 7bb73f9..d7978ed 100644
{
struct signal_struct *sig = current->signal;
diff --git a/kernel/fork.c b/kernel/fork.c
-index 987b28a..e0102b2 100644
+index 987b28a..11ee8a5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
@@ -76684,31 +76656,7 @@ index 987b28a..e0102b2 100644
return ERR_PTR(retval);
}
-@@ -1579,6 +1639,23 @@ long do_fork(unsigned long clone_flags,
- return -EINVAL;
- }
-
-+#ifdef CONFIG_GRKERNSEC
-+ if (clone_flags & CLONE_NEWUSER) {
-+ /*
-+ * This doesn't really inspire confidence:
-+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
-+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
-+ * Increases kernel attack surface in areas developers
-+ * previously cared little about ("low importance due
-+ * to requiring "root" capability")
-+ * To be removed when this code receives *proper* review
-+ */
-+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
-+ !capable(CAP_SETGID))
-+ return -EPERM;
-+ }
-+#endif
-+
- /*
- * Determine whether and which event to report to ptracer. When
- * called from kernel_thread or CLONE_UNTRACED is explicitly
-@@ -1613,6 +1690,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1613,6 +1673,8 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
@@ -76717,7 +76665,7 @@ index 987b28a..e0102b2 100644
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
-@@ -1723,7 +1802,7 @@ void __init proc_caches_init(void)
+@@ -1723,7 +1785,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -76726,7 +76674,7 @@ index 987b28a..e0102b2 100644
mmap_init();
nsproxy_cache_init();
}
-@@ -1763,7 +1842,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1763,7 +1825,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
@@ -76735,7 +76683,7 @@ index 987b28a..e0102b2 100644
return 0;
*new_fsp = copy_fs_struct(fs);
-@@ -1875,7 +1954,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1875,7 +1937,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
@@ -81051,10 +80999,10 @@ index e444ff8..438b8f4 100644
*data_page = bpage;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 0b936d8..306a7eb 100644
+index f7bc3ce..b8ef9b5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -3302,7 +3302,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+@@ -3303,7 +3303,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
return 0;
}
@@ -81077,10 +81025,10 @@ index 51b4448..7be601f 100644
/*
* Normal trace_printk() and friends allocates special buffers
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index 6dfd48b..a6d88d0 100644
+index 6953263..2004e16 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
-@@ -1731,10 +1731,6 @@ static LIST_HEAD(ftrace_module_file_list);
+@@ -1748,10 +1748,6 @@ static LIST_HEAD(ftrace_module_file_list);
struct ftrace_module_file_ops {
struct list_head list;
struct module *mod;
@@ -81091,7 +81039,7 @@ index 6dfd48b..a6d88d0 100644
};
static struct ftrace_module_file_ops *
-@@ -1775,17 +1771,12 @@ trace_create_file_ops(struct module *mod)
+@@ -1792,17 +1788,12 @@ trace_create_file_ops(struct module *mod)
file_ops->mod = mod;
@@ -81115,7 +81063,7 @@ index 6dfd48b..a6d88d0 100644
list_add(&file_ops->list, &ftrace_module_file_list);
-@@ -1878,8 +1869,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
+@@ -1895,8 +1886,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
struct ftrace_module_file_ops *file_ops)
{
return __trace_add_new_event(call, tr,
@@ -81214,10 +81162,55 @@ index b20428c..4845a10 100644
local_irq_save(flags);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
-index d8c30db..e065e89 100644
+index d8c30db..f2f6af5 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
-@@ -853,7 +853,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
+@@ -79,6 +79,21 @@ int create_user_ns(struct cred *new)
+ !kgid_has_mapping(parent_ns, group))
+ return -EPERM;
+
++#ifdef CONFIG_GRKERNSEC
++ /*
++ * This doesn't really inspire confidence:
++ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
++ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
++ * Increases kernel attack surface in areas developers
++ * previously cared little about ("low importance due
++ * to requiring "root" capability")
++ * To be removed when this code receives *proper* review
++ */
++ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
++ !capable(CAP_SETGID))
++ return -EPERM;
++#endif
++
+ ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
+ if (!ns)
+ return -ENOMEM;
+@@ -105,6 +120,7 @@ int create_user_ns(struct cred *new)
+ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
+ {
+ struct cred *cred;
++ int err;
+
+ if (!(unshare_flags & CLONE_NEWUSER))
+ return 0;
+@@ -113,8 +129,12 @@ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
+ if (!cred)
+ return -ENOMEM;
+
+- *new_cred = cred;
+- return create_user_ns(cred);
++ err = create_user_ns(cred);
++ if (err)
++ put_cred(cred);
++ else
++ *new_cred = cred;
++ return err;
+ }
+
+ void free_user_ns(struct user_namespace *ns)
+@@ -853,7 +873,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
if (atomic_read(&current->mm->mm_users) > 1)
return -EINVAL;
@@ -82419,7 +82412,7 @@ index ceb0c7f..b2b8e94 100644
} else {
pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
diff --git a/mm/memory.c b/mm/memory.c
-index 61a262b..77a94d1 100644
+index 5e50800..c47ba9a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -429,6 +429,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -82456,7 +82449,7 @@ index 61a262b..77a94d1 100644
}
/*
-@@ -1635,12 +1641,6 @@ no_page_table:
+@@ -1638,12 +1644,6 @@ no_page_table:
return page;
}
@@ -82469,7 +82462,7 @@ index 61a262b..77a94d1 100644
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
-@@ -1727,10 +1727,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1730,10 +1730,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
i = 0;
@@ -82482,7 +82475,7 @@ index 61a262b..77a94d1 100644
if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK;
pgd_t *pgd;
-@@ -1779,7 +1779,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1782,7 +1782,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto next_page;
}
@@ -82491,7 +82484,7 @@ index 61a262b..77a94d1 100644
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-@@ -1808,11 +1808,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1811,11 +1811,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ret;
unsigned int fault_flags = 0;
@@ -82503,7 +82496,7 @@ index 61a262b..77a94d1 100644
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
-@@ -1892,7 +1887,7 @@ next_page:
+@@ -1895,7 +1890,7 @@ next_page:
start += page_increm * PAGE_SIZE;
nr_pages -= page_increm;
} while (nr_pages && start < vma->vm_end);
@@ -82512,7 +82505,7 @@ index 61a262b..77a94d1 100644
return i;
}
EXPORT_SYMBOL(__get_user_pages);
-@@ -2099,6 +2094,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2102,6 +2097,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -82523,7 +82516,7 @@ index 61a262b..77a94d1 100644
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
-@@ -2143,9 +2142,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2146,9 +2145,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
@@ -82545,7 +82538,7 @@ index 61a262b..77a94d1 100644
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
-@@ -2228,6 +2239,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -2231,6 +2242,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -82553,7 +82546,7 @@ index 61a262b..77a94d1 100644
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
-@@ -2475,7 +2487,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -2478,7 +2490,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
@@ -82564,7 +82557,7 @@ index 61a262b..77a94d1 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -2495,7 +2509,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -2498,7 +2512,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
@@ -82575,7 +82568,7 @@ index 61a262b..77a94d1 100644
if (!pud)
return -ENOMEM;
do {
-@@ -2583,6 +2599,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+@@ -2586,6 +2602,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
copy_user_highpage(dst, src, va, vma);
}
@@ -82762,7 +82755,7 @@ index 61a262b..77a94d1 100644
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2799,6 +2995,12 @@ gotten:
+@@ -2802,6 +2998,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -82775,7 +82768,7 @@ index 61a262b..77a94d1 100644
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2850,6 +3052,10 @@ gotten:
+@@ -2853,6 +3055,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -82786,7 +82779,7 @@ index 61a262b..77a94d1 100644
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -3125,6 +3331,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3128,6 +3334,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -82798,7 +82791,7 @@ index 61a262b..77a94d1 100644
unlock_page(page);
if (page != swapcache) {
/*
-@@ -3148,6 +3359,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3151,6 +3362,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -82810,7 +82803,7 @@ index 61a262b..77a94d1 100644
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -3167,40 +3383,6 @@ out_release:
+@@ -3170,40 +3386,6 @@ out_release:
}
/*
@@ -82851,7 +82844,7 @@ index 61a262b..77a94d1 100644
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3209,27 +3391,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3212,27 +3394,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
@@ -82884,7 +82877,7 @@ index 61a262b..77a94d1 100644
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3253,6 +3431,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3256,6 +3434,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
@@ -82896,7 +82889,7 @@ index 61a262b..77a94d1 100644
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
setpte:
-@@ -3260,6 +3443,12 @@ setpte:
+@@ -3263,6 +3446,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -82909,7 +82902,7 @@ index 61a262b..77a94d1 100644
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -3403,6 +3592,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3406,6 +3595,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
/* Only go through if we didn't race with anybody else... */
if (likely(pte_same(*page_table, orig_pte))) {
@@ -82922,7 +82915,7 @@ index 61a262b..77a94d1 100644
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
-@@ -3422,6 +3617,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3425,6 +3620,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, page_table);
@@ -82937,7 +82930,7 @@ index 61a262b..77a94d1 100644
} else {
if (cow_page)
mem_cgroup_uncharge_page(cow_page);
-@@ -3743,6 +3946,12 @@ int handle_pte_fault(struct mm_struct *mm,
+@@ -3746,6 +3949,12 @@ int handle_pte_fault(struct mm_struct *mm,
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
@@ -82950,7 +82943,7 @@ index 61a262b..77a94d1 100644
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3759,6 +3968,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3762,6 +3971,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *pte;
@@ -82961,7 +82954,7 @@ index 61a262b..77a94d1 100644
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
-@@ -3770,6 +3983,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3773,6 +3986,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
@@ -82996,7 +82989,7 @@ index 61a262b..77a94d1 100644
retry:
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
-@@ -3868,6 +4109,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3871,6 +4112,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -83020,7 +83013,7 @@ index 61a262b..77a94d1 100644
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3898,6 +4156,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3901,6 +4159,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -83051,7 +83044,7 @@ index 61a262b..77a94d1 100644
#endif /* __PAGETABLE_PMD_FOLDED */
#if !defined(__HAVE_ARCH_GATE_AREA)
-@@ -3911,7 +4193,7 @@ static int __init gate_vma_init(void)
+@@ -3914,7 +4196,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -83060,7 +83053,7 @@ index 61a262b..77a94d1 100644
return 0;
}
-@@ -4045,8 +4327,8 @@ out:
+@@ -4048,8 +4330,8 @@ out:
return ret;
}
@@ -83071,7 +83064,7 @@ index 61a262b..77a94d1 100644
{
resource_size_t phys_addr;
unsigned long prot = 0;
-@@ -4071,8 +4353,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+@@ -4074,8 +4356,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
@@ -83082,7 +83075,7 @@ index 61a262b..77a94d1 100644
{
struct vm_area_struct *vma;
void *old_buf = buf;
-@@ -4080,7 +4362,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4083,7 +4365,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
@@ -83091,7 +83084,7 @@ index 61a262b..77a94d1 100644
void *maddr;
struct page *page = NULL;
-@@ -4139,8 +4421,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4142,8 +4424,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
*
* The caller must hold a reference on @mm.
*/
@@ -83102,7 +83095,7 @@ index 61a262b..77a94d1 100644
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
-@@ -4150,11 +4432,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -4153,11 +4435,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
@@ -83118,7 +83111,7 @@ index 61a262b..77a94d1 100644
mm = get_task_mm(tsk);
if (!mm)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
-index 7431001..0f8344e 100644
+index 4baf12e..5497066 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -708,6 +708,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
@@ -83132,11 +83125,7 @@ index 7431001..0f8344e 100644
vma = find_vma(mm, start);
if (!vma || vma->vm_start > start)
return -EFAULT;
-@@ -744,9 +748,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
- if (err)
- goto out;
- }
-+
+@@ -751,6 +755,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
err = vma_replace_policy(vma, new_pol);
if (err)
goto out;
@@ -83153,7 +83142,7 @@ index 7431001..0f8344e 100644
}
out:
-@@ -1202,6 +1217,17 @@ static long do_mbind(unsigned long start, unsigned long len,
+@@ -1206,6 +1220,17 @@ static long do_mbind(unsigned long start, unsigned long len,
if (end < start)
return -EINVAL;
@@ -83171,7 +83160,7 @@ index 7431001..0f8344e 100644
if (end == start)
return 0;
-@@ -1430,8 +1456,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+@@ -1434,8 +1459,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
*/
tcred = __task_cred(task);
if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
@@ -83181,7 +83170,7 @@ index 7431001..0f8344e 100644
rcu_read_unlock();
err = -EPERM;
goto out_put;
-@@ -1462,6 +1487,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+@@ -1466,6 +1490,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
goto out;
}
@@ -83283,7 +83272,7 @@ index 79b7cf7..9944291 100644
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mmap.c b/mm/mmap.c
-index f681e18..623110e 100644
+index 7dbe397..e84c411 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -36,6 +36,7 @@
@@ -83532,7 +83521,7 @@ index f681e18..623110e 100644
+ if (mm->pax_flags & MF_PAX_MPROTECT) {
+
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
-+ if (file && (vm_flags & VM_EXEC) && mm->binfmt &&
++ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
+ mm->binfmt->handle_mmap)
+ mm->binfmt->handle_mmap(file);
+#endif
@@ -88145,10 +88134,28 @@ index 2e7f194..0fa4d6d 100644
ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
-index dfc39d4..0b82c4d 100644
+index dfc39d4..0d4fa52 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
-@@ -1529,7 +1529,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+@@ -771,7 +771,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
+ ci = nla_data(tb[IFA_CACHEINFO]);
+ if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
+ err = -EINVAL;
+- goto errout;
++ goto errout_free;
+ }
+ *pvalid_lft = ci->ifa_valid;
+ *pprefered_lft = ci->ifa_prefered;
+@@ -779,6 +779,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
+
+ return ifa;
+
++errout_free:
++ inet_free_ifa(ifa);
+ errout:
+ return ERR_PTR(err);
+ }
+@@ -1529,7 +1531,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
idx = 0;
head = &net->dev_index_head[h];
rcu_read_lock();
@@ -88157,7 +88164,7 @@ index dfc39d4..0b82c4d 100644
net->dev_base_seq;
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
-@@ -1840,7 +1840,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
+@@ -1840,7 +1842,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
idx = 0;
head = &net->dev_index_head[h];
rcu_read_lock();
@@ -88166,7 +88173,7 @@ index dfc39d4..0b82c4d 100644
net->dev_base_seq;
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
-@@ -2065,7 +2065,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
+@@ -2065,7 +2067,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
#define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
@@ -88175,7 +88182,7 @@ index dfc39d4..0b82c4d 100644
struct ctl_table_header *sysctl_header;
struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
} devinet_sysctl = {
-@@ -2183,7 +2183,7 @@ static __net_init int devinet_init_net(struct net *net)
+@@ -2183,7 +2185,7 @@ static __net_init int devinet_init_net(struct net *net)
int err;
struct ipv4_devconf *all, *dflt;
#ifdef CONFIG_SYSCTL
@@ -88184,7 +88191,7 @@ index dfc39d4..0b82c4d 100644
struct ctl_table_header *forw_hdr;
#endif
-@@ -2201,7 +2201,7 @@ static __net_init int devinet_init_net(struct net *net)
+@@ -2201,7 +2203,7 @@ static __net_init int devinet_init_net(struct net *net)
goto err_alloc_dflt;
#ifdef CONFIG_SYSCTL
@@ -88193,7 +88200,7 @@ index dfc39d4..0b82c4d 100644
if (tbl == NULL)
goto err_alloc_ctl;
-@@ -2221,7 +2221,10 @@ static __net_init int devinet_init_net(struct net *net)
+@@ -2221,7 +2223,10 @@ static __net_init int devinet_init_net(struct net *net)
goto err_reg_dflt;
err = -ENOMEM;
@@ -88205,7 +88212,7 @@ index dfc39d4..0b82c4d 100644
if (forw_hdr == NULL)
goto err_reg_ctl;
net->ipv4.forw_hdr = forw_hdr;
-@@ -2237,8 +2240,7 @@ err_reg_ctl:
+@@ -2237,8 +2242,7 @@ err_reg_ctl:
err_reg_dflt:
__devinet_sysctl_unregister(all);
err_reg_all:
@@ -91289,6 +91296,18 @@ index f226709..0e735a8 100644
_proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
+diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
+index ca8e0a5..1f9c314 100644
+--- a/net/sched/sch_atm.c
++++ b/net/sched/sch_atm.c
+@@ -605,6 +605,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sockaddr_atmpvc pvc;
+ int state;
+
++ memset(&pvc, 0, sizeof(pvc));
+ pvc.sap_family = AF_ATMPVC;
+ pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
+ pvc.sap_addr.vpi = flow->vcc->vpi;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 391a245..296b3d7 100644
--- a/net/sctp/ipv6.c
@@ -91906,58 +91925,6 @@ index 8343737..677025e 100644
.mode = 0644,
.proc_handler = read_reset_stat,
},
-diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
-index 8d2eddd..65b1462 100644
---- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
-+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
-@@ -98,6 +98,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
- */
- static u32 *decode_write_list(u32 *va, u32 *vaend)
- {
-+ unsigned long start, end;
- int nchunks;
-
- struct rpcrdma_write_array *ary =
-@@ -113,9 +114,12 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
- return NULL;
- }
- nchunks = ntohl(ary->wc_nchunks);
-- if (((unsigned long)&ary->wc_array[0] +
-- (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
-- (unsigned long)vaend) {
-+
-+ start = (unsigned long)&ary->wc_array[0];
-+ end = (unsigned long)vaend;
-+ if (nchunks < 0 ||
-+ nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
-+ (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
- dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
- ary, nchunks, vaend);
- return NULL;
-@@ -129,6 +133,7 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
-
- static u32 *decode_reply_array(u32 *va, u32 *vaend)
- {
-+ unsigned long start, end;
- int nchunks;
- struct rpcrdma_write_array *ary =
- (struct rpcrdma_write_array *)va;
-@@ -143,9 +148,12 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
- return NULL;
- }
- nchunks = ntohl(ary->wc_nchunks);
-- if (((unsigned long)&ary->wc_array[0] +
-- (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
-- (unsigned long)vaend) {
-+
-+ start = (unsigned long)&ary->wc_array[0];
-+ end = (unsigned long)vaend;
-+ if (nchunks < 0 ||
-+ nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
-+ (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
- dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
- ary, nchunks, vaend);
- return NULL;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 0ce7552..d074459 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -92894,7 +92861,7 @@ index f5eb43d..1814de8 100644
shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
shstrtab_sec = shdr + r2(&ehdr->e_shstrndx);
diff --git a/security/Kconfig b/security/Kconfig
-index e9c6ac7..0d298ea 100644
+index e9c6ac7..a4d558d 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -4,6 +4,956 @@
@@ -93577,7 +93544,7 @@ index e9c6ac7..0d298ea 100644
+
+config PAX_RANDKSTACK
+ bool "Randomize kernel stack base"
-+ default y if GRKERNSEC_CONFIG_AUTO
++ default y if GRKERNSEC_CONFIG_AUTO && !(GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
+ depends on X86_TSC && X86
+ help
+ By saying Y here the kernel will randomize every task's kernel