[kernel] r16276 - in dists/sid/linux-2.6/debian: . patches/features/all/openvz

Maximilian Attems maks at alioth.debian.org
Mon Sep 13 19:08:00 UTC 2010


Author: maks
Date: Mon Sep 13 19:07:56 2010
New Revision: 16276

Log:
update openvz patch to their latest git

starts to get a pain to merge,
new conflicts in net/sock.h and net/tun.c

Modified:
   dists/sid/linux-2.6/debian/changelog
   dists/sid/linux-2.6/debian/patches/features/all/openvz/openvz.patch

Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog	Mon Sep 13 19:07:40 2010	(r16275)
+++ dists/sid/linux-2.6/debian/changelog	Mon Sep 13 19:07:56 2010	(r16276)
@@ -80,6 +80,7 @@
   * nouveau: disable acceleration on NVA3/NVA5/NVA8 by default.
   * openvz: disable KSM. Thanks Dietmar Maurer <dietmar at proxmox.com>.
     (closes: #585864)
+  * Update openvz patch to d38b56fd0dca.
   * openvz: enalbe modular VZ_EVENT.
 
  -- Ben Hutchings <ben at decadent.org.uk>  Fri, 27 Aug 2010 08:38:26 +0100

Modified: dists/sid/linux-2.6/debian/patches/features/all/openvz/openvz.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/openvz/openvz.patch	Mon Sep 13 19:07:40 2010	(r16275)
+++ dists/sid/linux-2.6/debian/patches/features/all/openvz/openvz.patch	Mon Sep 13 19:07:56 2010	(r16276)
@@ -1,3 +1,136 @@
+commit d38b56fd0dcacadcaeaa2e6b66260028cde13931
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Fri Sep 3 17:34:05 2010 +0400
+
+    OpenVZ kernel 2.6.32-dobrovolskiy released
+    
+    Named after Georgiy Timofeyevich Dobrovolskiy - a soviet cosmonaut
+    
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit 097aad239bad5a90282db2d10c52103cf56fd50b
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Fri Sep 3 16:13:52 2010 +0400
+
+    vzevent: Don't use KOBJ_XXX constants any longer.
+    
+    Introduce our own ones, since their usage is internal.
+    
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit 3a07627d33e0ea2cc842bf9631950aba901e651c
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Fri Sep 3 16:13:21 2010 +0400
+
+    vzevent: Compilation fixes on 2.6.32
+    
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit dda56ffbc306d36ecb5a8c73a7445922683b1766
+Author: Vitaly Gusev <vgusev at openvz.org>
+Date:   Fri Sep 3 16:12:29 2010 +0400
+
+    vzevent: Hook the module into Kconfig and Makefile
+    
+    Signed-off-by: Vitaly Gusev <vgusev at openvz.org>
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit 8b0dcdfb715811064c54b3070b13e702e91c8c5d
+Author: Vitaly Gusev <vgusev at openvz.org>
+Date:   Fri Sep 3 16:12:00 2010 +0400
+
+    vzevent: add "reboot_event" parameter to the vzevent module
+    
+    By default "reboot_event" parameter is 0. This means that no any
+    reboot events are send to userspace and only "STOP" event.
+    
+    To get "reboot" events set this parameter to 1.
+    
+    Signed-off-by: Vitaly Gusev <vgusev at openvz.org>
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit 092221b382aeaa9c7f6247c04154ae188282998a
+Author: Vitaly Gusev <vgusev at openvz.org>
+Date:   Fri Sep 3 16:11:31 2010 +0400
+
+    vzevent:  Add "ve-reboot" event
+    
+    Now userspace may receive
+    
+             "ve-reboot at VEID"
+    
+     message
+    
+    Signed-off-by: Vitaly Gusev <vgusev at openvz.org>
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit 850fa8377c6717b41a8bee5650af44a7a0b41c48
+Author: Vitaly Gusev <vgusev at openvz.org>
+Date:   Fri Sep 3 16:10:47 2010 +0400
+
+    vzevent: Don't NULL dereference
+    
+    Now all actions are in a "switch", but what if some action will be
+    missed?
+    
+    Signed-off-by: Vitaly Gusev <vgusev at openvz.org>
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit 03263888e35b4f49bd73fa53753101904fcaa6c0
+Merge: c9faa01 671e83b
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Fri Sep 3 16:09:22 2010 +0400
+
+    Merged linux-2.6.32.21
+    
+    Conflicts:
+    
+    	Makefile
+    
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit c9faa010fb66cb18d61a4bb0b3f010d90cb47d84
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Tue Aug 24 12:48:00 2010 +0400
+
+    OpenVZ kernel 2.6.32-bykovsky released
+    
+    Named after Valery Fyodorovich Bykovsky - a soviet cosmonaut
+    
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit 1ebdc4f988fbb4526e95e96bdcd432168de7ce1a
+Merge: b870541 b53e490
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Mon Aug 23 19:32:06 2010 +0400
+
+    Merged linux-2.6.32.20
+    
+    Conflicts:
+    
+    	Makefile
+    	mm/memory.c
+    
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit b870541d7cc51f53da8b2de4383b0d4c3bb81f23
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Thu Jul 15 18:16:14 2010 +0400
+
+    cpt: Fix TLS segment migration between arches
+    
+    On x86 the tls segment is saved in cpt_ugs field, not the cpt_gs one.
+    
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit b45bd227998b427cd636c4fa403ee9d8fe2f0ba5
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Thu Jul 15 18:15:00 2010 +0400
+
+    cpt: Fix nanosleep compat restart block restoring
+    
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
 commit 5fd638726a6999e334e5e2c0635a03a447adc0d1
 Author: Pavel Emelyanov <xemul at openvz.org>
 Date:   Thu Jun 17 20:45:46 2010 +0400
@@ -6320,14 +6453,14 @@
 +library.  If this is what you want to do, use the GNU Library General
 +Public License instead of this License.
 diff --git a/Makefile b/Makefile
-index 36fead3..674349f 100644
+index 3e7196f..0dae08c 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -2,6 +2,7 @@ VERSION = 2
  PATCHLEVEL = 6
  SUBLEVEL = 32
  EXTRAVERSION =
-+VZVERSION = budarin
++VZVERSION = dobrovolskiy
  NAME = Man-Eating Seals of Antiquity
  
  # *DOCUMENTATION*
@@ -6351,10 +6484,10 @@
  
  define filechk_version.h
 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index fbc161d..e6cc64c 100644
+index cb5a57c..8904217 100644
 --- a/arch/x86/Kconfig
 +++ b/arch/x86/Kconfig
-@@ -2074,6 +2074,8 @@ config HAVE_ATOMIC_IOMAP
+@@ -2079,6 +2079,8 @@ config HAVE_ATOMIC_IOMAP
  	def_bool y
  	depends on X86_32
  
@@ -6363,7 +6496,7 @@
  source "net/Kconfig"
  
  source "drivers/Kconfig"
-@@ -2091,3 +2093,5 @@ source "crypto/Kconfig"
+@@ -2096,3 +2098,5 @@ source "crypto/Kconfig"
  source "arch/x86/kvm/Kconfig"
  
  source "lib/Kconfig"
@@ -7028,10 +7161,10 @@
  	.smp_prepare_boot_cpu	= native_smp_prepare_boot_cpu,
  	.smp_prepare_cpus	= native_smp_prepare_cpus,
 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
-index 28e963d..54a0ecf 100644
+index 29ec560..4daf07b 100644
 --- a/arch/x86/kernel/smpboot.c
 +++ b/arch/x86/kernel/smpboot.c
-@@ -733,6 +733,12 @@ do_rest:
+@@ -752,6 +752,12 @@ do_rest:
  	initial_code = (unsigned long)start_secondary;
  	stack_start.sp = (void *) c_idle.idle->thread.sp;
  
@@ -7123,7 +7256,7 @@
  
  EXPORT_SYMBOL(__get_user_1);
 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index f4cee90..3e549cd 100644
+index 1739358..4803459 100644
 --- a/arch/x86/mm/fault.c
 +++ b/arch/x86/mm/fault.c
 @@ -689,7 +689,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
@@ -7135,7 +7268,7 @@
  		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
  		tsk->comm, task_pid_nr(tsk), address,
  		(void *)regs->ip, (void *)regs->sp, error_code);
-@@ -909,7 +909,7 @@ spurious_fault(unsigned long error_code, unsigned long address)
+@@ -911,7 +911,7 @@ spurious_fault(unsigned long error_code, unsigned long address)
  	return ret;
  }
  
@@ -12115,7 +12248,7 @@
  	if (!sk)
  		return -ENOMEM;
 diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
-index 5910df6..0b64d3d 100644
+index b724d7f..c457a95 100644
 --- a/drivers/net/pppol2tp.c
 +++ b/drivers/net/pppol2tp.c
 @@ -97,6 +97,7 @@
@@ -12126,7 +12259,7 @@
  
  #include <asm/byteorder.h>
  #include <asm/atomic.h>
-@@ -1588,6 +1589,9 @@ static int pppol2tp_create(struct net *net, struct socket *sock)
+@@ -1589,6 +1590,9 @@ static int pppol2tp_create(struct net *net, struct socket *sock)
  	int error = -ENOMEM;
  	struct sock *sk;
  
@@ -14886,10 +15019,10 @@
  	unregister_filesystem(&bm_fs_type);
  }
 diff --git a/fs/block_dev.c b/fs/block_dev.c
-index 9b9e3dc..fe0cca1 100644
+index e65efa2..45cb33a 100644
 --- a/fs/block_dev.c
 +++ b/fs/block_dev.c
-@@ -1602,7 +1602,7 @@ int __invalidate_device(struct block_device *bdev)
+@@ -1604,7 +1604,7 @@ int __invalidate_device(struct block_device *bdev)
  		 * hold).
  		 */
  		shrink_dcache_sb(sb);
@@ -16194,10 +16327,10 @@
  
  static int __init init_ext3_fs(void)
 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
-index 16efcee..3833fe9 100644
+index 99596fc..51c1399 100644
 --- a/fs/ext4/inode.c
 +++ b/fs/ext4/inode.c
-@@ -5770,9 +5770,14 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -5840,9 +5840,14 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  	int ret = -EINVAL;
  	void *fsdata;
  	struct file *file = vma->vm_file;
@@ -16215,7 +16348,7 @@
  	 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
  	 * get i_mutex because we are already holding mmap_sem.
 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
-index b63d193..0ae6e52 100644
+index bf5ae88..3952507 100644
 --- a/fs/ext4/ioctl.c
 +++ b/fs/ext4/ioctl.c
 @@ -77,7 +77,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
@@ -17892,7 +18025,7 @@
   		return 0;
   
 diff --git a/fs/namespace.c b/fs/namespace.c
-index bdc3cb4..d811360 100644
+index 2beb0fb..b76df5d 100644
 --- a/fs/namespace.c
 +++ b/fs/namespace.c
 @@ -29,6 +29,7 @@
@@ -18075,7 +18208,7 @@
  /*
   * Now umount can handle mount points as well as block devices.
   * This is important for filesystems which use unnamed block devices.
-@@ -1130,7 +1210,7 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
+@@ -1137,7 +1217,7 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
  		goto dput_and_out;
  
  	retval = -EPERM;
@@ -18084,7 +18217,7 @@
  		goto dput_and_out;
  
  	retval = do_umount(path.mnt, flags);
-@@ -1156,7 +1236,7 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
+@@ -1163,7 +1243,7 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
  
  static int mount_is_safe(struct path *path)
  {
@@ -18093,7 +18226,7 @@
  		return 0;
  	return -EPERM;
  #ifdef notyet
-@@ -1425,6 +1505,8 @@ static int do_change_type(struct path *path, int flag)
+@@ -1432,6 +1512,8 @@ static int do_change_type(struct path *path, int flag)
  
  	if (path->dentry != path->mnt->mnt_root)
  		return -EINVAL;
@@ -18102,7 +18235,7 @@
  
  	down_write(&namespace_sem);
  	if (type == MS_SHARED) {
-@@ -1447,7 +1529,7 @@ static int do_change_type(struct path *path, int flag)
+@@ -1454,7 +1536,7 @@ static int do_change_type(struct path *path, int flag)
   * do loopback mount.
   */
  static int do_loopback(struct path *path, char *old_name,
@@ -18111,7 +18244,7 @@
  {
  	struct path old_path;
  	struct vfsmount *mnt = NULL;
-@@ -1477,6 +1559,7 @@ static int do_loopback(struct path *path, char *old_name,
+@@ -1484,6 +1566,7 @@ static int do_loopback(struct path *path, char *old_name,
  	if (!mnt)
  		goto out;
  
@@ -18119,7 +18252,7 @@
  	err = graft_tree(mnt, path);
  	if (err) {
  		LIST_HEAD(umount_list);
-@@ -1520,7 +1603,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
+@@ -1527,7 +1610,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
  	int err;
  	struct super_block *sb = path->mnt->mnt_sb;
  
@@ -18128,7 +18261,7 @@
  		return -EPERM;
  
  	if (!check_mnt(path->mnt))
-@@ -1529,6 +1612,9 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
+@@ -1536,6 +1619,9 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
  	if (path->dentry != path->mnt->mnt_root)
  		return -EINVAL;
  
@@ -18138,7 +18271,7 @@
  	down_write(&sb->s_umount);
  	if (flags & MS_BIND)
  		err = change_mount_flags(path->mnt, flags);
-@@ -1562,7 +1648,7 @@ static int do_move_mount(struct path *path, char *old_name)
+@@ -1569,7 +1655,7 @@ static int do_move_mount(struct path *path, char *old_name)
  	struct path old_path, parent_path;
  	struct vfsmount *p;
  	int err = 0;
@@ -18147,7 +18280,7 @@
  		return -EPERM;
  	if (!old_name || !*old_name)
  		return -EINVAL;
-@@ -1570,6 +1656,10 @@ static int do_move_mount(struct path *path, char *old_name)
+@@ -1577,6 +1663,10 @@ static int do_move_mount(struct path *path, char *old_name)
  	if (err)
  		return err;
  
@@ -18158,7 +18291,7 @@
  	down_write(&namespace_sem);
  	while (d_mountpoint(path->dentry) &&
  	       follow_down(path))
-@@ -1627,6 +1717,7 @@ out:
+@@ -1634,6 +1724,7 @@ out:
  	up_write(&namespace_sem);
  	if (!err)
  		path_put(&parent_path);
@@ -18166,7 +18299,7 @@
  	path_put(&old_path);
  	return err;
  }
-@@ -1644,7 +1735,7 @@ static int do_new_mount(struct path *path, char *type, int flags,
+@@ -1651,7 +1742,7 @@ static int do_new_mount(struct path *path, char *type, int flags,
  		return -EINVAL;
  
  	/* we need capabilities... */
@@ -18175,7 +18308,7 @@
  		return -EPERM;
  
  	lock_kernel();
-@@ -1685,6 +1776,11 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path,
+@@ -1692,6 +1783,11 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path,
  		goto unlock;
  
  	newmnt->mnt_flags = mnt_flags;
@@ -18187,7 +18320,7 @@
  	if ((err = graft_tree(newmnt, path)))
  		goto unlock;
  
-@@ -1959,7 +2055,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
+@@ -1966,7 +2062,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
  		retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
  				    data_page);
  	else if (flags & MS_BIND)
@@ -18196,7 +18329,7 @@
  	else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
  		retval = do_change_type(&path, flags);
  	else if (flags & MS_MOVE)
-@@ -2122,6 +2218,7 @@ out_dir:
+@@ -2129,6 +2225,7 @@ out_dir:
  out_type:
  	return ret;
  }
@@ -18204,7 +18337,7 @@
  
  /*
   * pivot_root Semantics:
-@@ -2281,7 +2378,7 @@ void __init mnt_init(void)
+@@ -2288,7 +2385,7 @@ void __init mnt_init(void)
  	init_rwsem(&namespace_sem);
  
  	mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
@@ -18281,7 +18414,7 @@
  		if (clp->rpc_ops != data->rpc_ops)
  			continue;
 diff --git a/fs/nfs/super.c b/fs/nfs/super.c
-index 4bf23f6..253438f 100644
+index c0173a8..8ffb55b 100644
 --- a/fs/nfs/super.c
 +++ b/fs/nfs/super.c
 @@ -53,6 +53,9 @@
@@ -18386,7 +18519,7 @@
  #ifdef CONFIG_NFS_V4
  	unregister_filesystem(&nfs4_fs_type);
  #endif
-@@ -1794,6 +1850,11 @@ static int nfs_validate_mount_data(void *options,
+@@ -1815,6 +1871,11 @@ static int nfs_validate_mount_data(void *options,
  		goto out_v3_not_compiled;
  #endif /* !CONFIG_NFS_V3 */
  
@@ -18398,7 +18531,7 @@
  	return 0;
  
  out_no_data:
-@@ -2079,6 +2140,10 @@ static int nfs_compare_super(struct super_block *sb, void *data)
+@@ -2100,6 +2161,10 @@ static int nfs_compare_super(struct super_block *sb, void *data)
  	struct nfs_server *server = sb_mntdata->server, *old = NFS_SB(sb);
  	int mntflags = sb_mntdata->mntflags;
  
@@ -18409,7 +18542,7 @@
  	if (!nfs_compare_super_address(old, server))
  		return 0;
  	/* Note: NFS_MOUNT_UNSHARED == NFS4_MOUNT_UNSHARED */
-@@ -2107,6 +2172,11 @@ static int nfs_get_sb(struct file_system_type *fs_type,
+@@ -2128,6 +2193,11 @@ static int nfs_get_sb(struct file_system_type *fs_type,
  		.mntflags = flags,
  	};
  	int error = -ENOMEM;
@@ -18421,7 +18554,7 @@
  
  	data = nfs_alloc_parsed_mount_data(3);
  	mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
-@@ -2237,6 +2307,11 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
+@@ -2258,6 +2328,11 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
  		.mntflags = flags,
  	};
  	int error;
@@ -18512,7 +18645,7 @@
 +	spin_unlock(&inode_lock);
 +}
 diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h
-index f234f3a..21faa74 100644
+index f234f3a..21faa743 100644
 --- a/fs/notify/inotify/inotify.h
 +++ b/fs/notify/inotify/inotify.h
 @@ -13,6 +13,7 @@ struct inotify_inode_mark_entry {
@@ -19013,7 +19146,7 @@
  /*
   * sys_pipe() is the normal C calling standard for creating
 diff --git a/fs/proc/array.c b/fs/proc/array.c
-index 725a650..7de3905 100644
+index 42fdc76..0310840 100644
 --- a/fs/proc/array.c
 +++ b/fs/proc/array.c
 @@ -83,6 +83,8 @@
@@ -19165,7 +19298,7 @@
  
  	state = *get_task_state(task);
  	vsize = eip = esp = 0;
-@@ -444,6 +496,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+@@ -441,6 +493,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
  	priority = task_prio(task);
  	nice = task_nice(task);
  
@@ -19173,7 +19306,7 @@
  	/* Temporary variable needed for gcc-2.96 */
  	/* convert timespec -> nsec*/
  	start_time =
-@@ -451,10 +504,25 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+@@ -448,10 +501,25 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
  				+ task->real_start_time.tv_nsec;
  	/* convert nsec -> ticks */
  	start_time = nsec_to_clock_t(start_time);
@@ -19200,7 +19333,7 @@
  		pid_nr_ns(pid, ns),
  		tcomm,
  		state,
-@@ -501,7 +569,16 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+@@ -498,7 +566,16 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
  		task->policy,
  		(unsigned long long)delayacct_blkio_ticks(task),
  		cputime_to_clock_t(gtime),
@@ -26895,7 +27028,7 @@
  		goto out;
  
 diff --git a/fs/signalfd.c b/fs/signalfd.c
-index b07565c..5b872c3 100644
+index d98bea8..d0c9670 100644
 --- a/fs/signalfd.c
 +++ b/fs/signalfd.c
 @@ -28,10 +28,7 @@
@@ -26910,7 +27043,7 @@
  
  static int signalfd_release(struct inode *inode, struct file *file)
  {
-@@ -199,17 +196,17 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
+@@ -201,17 +198,17 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
  	return total ? total: ret;
  }
  
@@ -26930,7 +27063,7 @@
  
  	/* Check the SFD_* constants for consistency.  */
  	BUILD_BUG_ON(SFD_CLOEXEC != O_CLOEXEC);
-@@ -224,12 +221,19 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
+@@ -226,12 +223,19 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
  	sigdelsetmask(&sigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
  	signotset(&sigmask);
  
@@ -26951,7 +27084,7 @@
  
  		/*
  		 * When we call this, the initialization must be complete, since
-@@ -249,7 +253,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
+@@ -251,7 +255,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
  			return -EINVAL;
  		}
  		spin_lock_irq(&current->sighand->siglock);
@@ -26960,7 +27093,7 @@
  		spin_unlock_irq(&current->sighand->siglock);
  
  		wake_up(&current->sighand->signalfd_wqh);
-@@ -258,6 +262,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
+@@ -260,6 +264,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
  
  	return ufd;
  }
@@ -30001,7 +30134,7 @@
 +
  #endif /* __LINUX__AIO_H */
 diff --git a/include/linux/capability.h b/include/linux/capability.h
-index c8f2a5f..301d709 100644
+index c8f2a5f7..301d709 100644
 --- a/include/linux/capability.h
 +++ b/include/linux/capability.h
 @@ -197,12 +197,9 @@ struct cpu_vfs_cap_data {
@@ -32467,7 +32600,7 @@
 +#endif /* __LINUX_FAIRSCHED_H__ */
 diff --git a/include/linux/faudit.h b/include/linux/faudit.h
 new file mode 100644
-index 0000000..631c42e
+index 0000000..631c42e6
 --- /dev/null
 +++ b/include/linux/faudit.h
 @@ -0,0 +1,45 @@
@@ -32541,7 +32674,7 @@
  static inline int frozen(struct task_struct *p) { return 0; }
  static inline int freezing(struct task_struct *p) { return 0; }
 diff --git a/include/linux/fs.h b/include/linux/fs.h
-index 9b67805..3fef9ef 100644
+index 1ff0962..6c4a03b 100644
 --- a/include/linux/fs.h
 +++ b/include/linux/fs.h
 @@ -53,6 +53,7 @@ struct inodes_stat_t {
@@ -32657,7 +32790,7 @@
  	unsigned int fl_pid;
  	struct pid *fl_nspid;
  	wait_queue_head_t fl_wait;
-@@ -1509,6 +1532,7 @@ struct file_operations {
+@@ -1511,6 +1534,7 @@ struct file_operations {
  	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
  	ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
  	int (*setlease)(struct file *, long, struct file_lock **);
@@ -32665,7 +32798,7 @@
  };
  
  struct inode_operations {
-@@ -1578,6 +1602,7 @@ struct super_operations {
+@@ -1580,6 +1604,7 @@ struct super_operations {
  #ifdef CONFIG_QUOTA
  	ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
  	ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
@@ -32673,7 +32806,7 @@
  #endif
  	int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
  };
-@@ -1755,8 +1780,14 @@ struct file_system_type {
+@@ -1757,8 +1782,14 @@ struct file_system_type {
  	struct lock_class_key i_mutex_key;
  	struct lock_class_key i_mutex_dir_key;
  	struct lock_class_key i_alloc_sem_key;
@@ -32688,7 +32821,7 @@
  extern int get_sb_ns(struct file_system_type *fs_type, int flags, void *data,
  	int (*fill_super)(struct super_block *, void *, int),
  	struct vfsmount *mnt);
-@@ -1800,13 +1831,20 @@ extern int register_filesystem(struct file_system_type *);
+@@ -1802,13 +1833,20 @@ extern int register_filesystem(struct file_system_type *);
  extern int unregister_filesystem(struct file_system_type *);
  extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
  #define kern_mount(type) kern_mount_data(type, NULL)
@@ -32709,7 +32842,7 @@
  
  extern int current_umask(void);
  
-@@ -2065,7 +2103,8 @@ extern int check_disk_change(struct block_device *);
+@@ -2067,7 +2105,8 @@ extern int check_disk_change(struct block_device *);
  extern int __invalidate_device(struct block_device *);
  extern int invalidate_partition(struct gendisk *, int);
  #endif
@@ -32719,7 +32852,7 @@
  unsigned long invalidate_mapping_pages(struct address_space *mapping,
  					pgoff_t start, pgoff_t end);
  
-@@ -2478,6 +2517,17 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
+@@ -2480,6 +2519,17 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
  ssize_t simple_attr_write(struct file *file, const char __user *buf,
  			  size_t len, loff_t *ppos);
  
@@ -33320,7 +33453,7 @@
  
  const char * arch_vma_name(struct vm_area_struct *vma);
 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index 84a524a..8ecf0ec 100644
+index 9d12ed5..a037e0a 100644
 --- a/include/linux/mm_types.h
 +++ b/include/linux/mm_types.h
 @@ -106,6 +106,14 @@ struct page {
@@ -33423,7 +33556,7 @@
  extern int user_path_at(int, const char __user *, unsigned, struct path *);
  
 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index 812a5f3..94887e1 100644
+index ec12f8c..fa2222d 100644
 --- a/include/linux/netdevice.h
 +++ b/include/linux/netdevice.h
 @@ -300,6 +300,11 @@ enum netdev_state_t
@@ -33522,7 +33655,7 @@
  extern int		dev_set_alias(struct net_device *, const char *, size_t);
  extern int		dev_change_net_namespace(struct net_device *,
  						 struct net *, const char *);
-@@ -1914,6 +1959,18 @@ unsigned long netdev_increment_features(unsigned long all, unsigned long one,
+@@ -1916,6 +1961,18 @@ unsigned long netdev_increment_features(unsigned long all, unsigned long one,
  					unsigned long mask);
  unsigned long netdev_fix_features(unsigned long features, const char *name);
  
@@ -33923,7 +34056,7 @@
 +extern int do_nmi_show_regs(struct pt_regs *regs, int cpu);
  #endif
 diff --git a/include/linux/notifier.h b/include/linux/notifier.h
-index 44428d2..a3a0a02 100644
+index 5ecdb50..777b1e9 100644
 --- a/include/linux/notifier.h
 +++ b/include/linux/notifier.h
 @@ -153,8 +153,9 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
@@ -34292,7 +34425,7 @@
  static inline void page_dup_rmap(struct page *page)
  {
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index b253434..1412d9a 100644
+index cc24beb..e876c91 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -94,6 +94,8 @@ struct sched_param {
@@ -34362,7 +34495,7 @@
  /*
   * NOTE! "signal_struct" does not have it's own
   * locking, because a shared signal_struct always
-@@ -1283,6 +1313,7 @@ struct task_struct {
+@@ -1286,6 +1316,7 @@ struct task_struct {
  	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
  				 * execve */
  	unsigned in_iowait:1;
@@ -34370,7 +34503,7 @@
  
  
  	/* Revert to default priority/policy when forking */
-@@ -1498,6 +1529,14 @@ struct task_struct {
+@@ -1501,6 +1532,14 @@ struct task_struct {
  	struct rcu_head rcu;
  
  	/*
@@ -34385,7 +34518,7 @@
  	 * cache last used pipe for splice
  	 */
  	struct pipe_inode_info *splice_pipe;
-@@ -1541,6 +1580,19 @@ struct task_struct {
+@@ -1544,6 +1583,19 @@ struct task_struct {
  	unsigned long trace_recursion;
  #endif /* CONFIG_TRACING */
  	unsigned long stack_start;
@@ -34405,7 +34538,7 @@
  };
  
  /* Future-safe accessor for struct task_struct's cpus_allowed. */
-@@ -1726,6 +1778,43 @@ extern cputime_t task_utime(struct task_struct *p);
+@@ -1730,6 +1782,43 @@ extern cputime_t task_stime(struct task_struct *p);
  extern cputime_t task_gtime(struct task_struct *p);
  extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
  
@@ -34449,7 +34582,7 @@
  /*
   * Per process flags
   */
-@@ -1735,6 +1824,7 @@ extern cputime_t task_gtime(struct task_struct *p);
+@@ -1739,6 +1828,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
  #define PF_EXITING	0x00000004	/* getting shut down */
  #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
  #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
@@ -34457,7 +34590,7 @@
  #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
  #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
  #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
-@@ -1871,6 +1961,21 @@ extern unsigned long long
+@@ -1875,6 +1965,21 @@ extern unsigned long long
  task_sched_runtime(struct task_struct *task);
  extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
  
@@ -34479,7 +34612,7 @@
  /* sched_exec is called by processes performing an exec */
  #ifdef CONFIG_SMP
  extern void sched_exec(void);
-@@ -2150,6 +2255,13 @@ extern int disallow_signal(int);
+@@ -2154,6 +2259,13 @@ extern int disallow_signal(int);
  
  extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
  extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
@@ -34493,7 +34626,7 @@
  struct task_struct *fork_idle(int);
  
  extern void set_task_comm(struct task_struct *tsk, char *from);
-@@ -2167,11 +2279,11 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
+@@ -2171,11 +2283,11 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
  }
  #endif
  
@@ -34508,7 +34641,7 @@
  
  extern bool current_is_single_threaded(void);
  
-@@ -2179,10 +2291,10 @@ extern bool current_is_single_threaded(void);
+@@ -2183,10 +2295,10 @@ extern bool current_is_single_threaded(void);
   * Careful: do_each_thread/while_each_thread is a double loop so
   *          'break' will not work as expected - use goto instead.
   */
@@ -34522,7 +34655,7 @@
  	while ((t = next_thread(t)) != g)
  
  /* de_thread depends on thread_group_leader not being a pid based check */
-@@ -2207,8 +2319,14 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
+@@ -2211,8 +2323,14 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
  
  static inline struct task_struct *next_thread(const struct task_struct *p)
  {
@@ -34538,7 +34671,7 @@
  }
  
  static inline int thread_group_empty(struct task_struct *p)
-@@ -2253,6 +2371,98 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
+@@ -2257,6 +2375,98 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
  	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
  }
  
@@ -35133,7 +35266,7 @@
  	const struct rpc_timeout *timeout;	/* timeout parms */
  	struct sockaddr_storage	addr;		/* server address */
 diff --git a/include/linux/swap.h b/include/linux/swap.h
-index 4ec9001..c2ad7fd 100644
+index 977d150..2589995 100644
 --- a/include/linux/swap.h
 +++ b/include/linux/swap.h
 @@ -19,6 +19,7 @@ struct bio;
@@ -35200,7 +35333,7 @@
  /* linux/mm/page_alloc.c */
  extern unsigned long totalram_pages;
  extern unsigned long totalreserve_pages;
-@@ -294,6 +313,7 @@ extern void show_swap_cache_info(void);
+@@ -284,6 +303,7 @@ extern void show_swap_cache_info(void);
  extern int add_to_swap(struct page *);
  extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
  extern void __delete_from_swap_cache(struct page *);
@@ -35208,7 +35341,7 @@
  extern void delete_from_swap_cache(struct page *);
  extern void free_page_and_swap_cache(struct page *);
  extern void free_pages_and_swap_cache(struct page **, int);
-@@ -307,7 +327,7 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
+@@ -297,7 +317,7 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
  extern long nr_swap_pages;
  extern long total_swap_pages;
  extern void si_swapinfo(struct sysinfo *);
@@ -35217,7 +35350,7 @@
  extern swp_entry_t get_swap_page_of_type(int);
  extern void swap_duplicate(swp_entry_t);
  extern int swapcache_prepare(swp_entry_t);
-@@ -322,6 +342,7 @@ extern sector_t swapdev_block(int, pgoff_t);
+@@ -312,6 +332,7 @@ extern sector_t swapdev_block(int, pgoff_t);
  extern struct swap_info_struct *get_swap_info_struct(unsigned);
  extern int reuse_swap_page(struct page *);
  extern int try_to_free_swap(struct page *);
@@ -35225,7 +35358,7 @@
  struct backing_dev_info;
  
  /* linux/mm/thrash.c */
-@@ -438,7 +459,7 @@ static inline int try_to_free_swap(struct page *page)
+@@ -428,7 +449,7 @@ static inline int try_to_free_swap(struct page *page)
  	return 0;
  }
  
@@ -35534,10 +35667,10 @@
  static inline void get_uts_ns(struct uts_namespace *ns)
 diff --git a/include/linux/ve.h b/include/linux/ve.h
 new file mode 100644
-index 0000000..e473727
+index 0000000..c4ba1d0
 --- /dev/null
 +++ b/include/linux/ve.h
-@@ -0,0 +1,359 @@
+@@ -0,0 +1,364 @@
 +/*
 + *  include/linux/ve.h
 + *
@@ -35699,6 +35832,7 @@
 +	int			is_running;
 +	int			is_locked;
 +	atomic_t		suspend;
++	unsigned long		flags;
 +	/* see vzcalluser.h for VE_FEATURE_XXX definitions */
 +	__u64			features;
 +
@@ -35831,6 +35965,10 @@
 +#define VE_MEMINFO_DEFAULT      1       /* default behaviour */
 +#define VE_MEMINFO_SYSTEM       0       /* disable meminfo virtualization */
 +
++enum {
++	VE_REBOOT,
++};
++
 +int init_ve_cgroups(struct ve_struct *ve);
 +void fini_ve_cgroups(struct ve_struct *ve);
 +
@@ -37063,10 +37201,10 @@
 +
 diff --git a/include/linux/vzevent.h b/include/linux/vzevent.h
 new file mode 100644
-index 0000000..1a67297
+index 0000000..a13688b
 --- /dev/null
 +++ b/include/linux/vzevent.h
-@@ -0,0 +1,13 @@
+@@ -0,0 +1,21 @@
 +#ifndef __LINUX_VZ_EVENT_H__
 +#define __LINUX_VZ_EVENT_H__
 +
@@ -37079,6 +37217,14 @@
 +}
 +#endif
 +
++enum {
++	VE_EVENT_MOUNT,
++	VE_EVENT_UMOUNT,
++	VE_EVENT_START,
++	VE_EVENT_STOP,
++	VE_EVENT_REBOOT,
++};
++
 +#endif /* __LINUX_VZ_EVENT_H__ */
 diff --git a/include/linux/vziptable_defs.h b/include/linux/vziptable_defs.h
 new file mode 100644
@@ -38075,9 +38221,9 @@
  	void                    (*sk_destruct)(struct sock *sk);
 +	struct sock_beancounter sk_bc;
 +	struct ve_struct	*owner_env;
- #ifndef __GENKSYMS__
- 	int			sk_backlog_len;
- 	int			sk_backlog_limit;
+ };
+ 
+ /*
 @@ -591,6 +595,8 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  	})
  
@@ -39225,10 +39371,10 @@
  			struct ipc_ops *ops, struct ipc_params *params);
 diff --git a/kernel/Kconfig.openvz b/kernel/Kconfig.openvz
 new file mode 100644
-index 0000000..2216a4c
+index 0000000..86f81ac
 --- /dev/null
 +++ b/kernel/Kconfig.openvz
-@@ -0,0 +1,92 @@
+@@ -0,0 +1,103 @@
 +# Copyright (C) 2005  SWsoft
 +# All rights reserved.
 +# Licensing governed by "linux/COPYING.SWsoft" file.
@@ -39320,6 +39466,17 @@
 + 	  to save a running Virtual Environment and restore it
 + 	  on another host (live migration) or on the same host (checkpointing).
 +
++config VZ_EVENT
++ 	tristate "Enable sending notifications of the VE status change through the netlink socket"
++ 	depends on VE && VE_CALLS && NET
++ 	default m
++ 	help
++ 	  This option provides for sending notifications of the VE
++ 	  events to the curious user space applications through
++ 	  the netlink socket just like the core kernel
++ 	  networking code does. By now just the notifications of
++ 	  the VE essensial status changes are being sent.
++
 +endmenu
 diff --git a/kernel/Makefile b/kernel/Makefile
 index d7c13d2..59704fe 100644
@@ -46116,7 +46273,7 @@
 +	return 0;
 +}
 diff --git a/kernel/compat.c b/kernel/compat.c
-index f6c204f..c13b053 100644
+index 180d188..a0c2fa9 100644
 --- a/kernel/compat.c
 +++ b/kernel/compat.c
 @@ -22,6 +22,7 @@
@@ -53414,10 +53571,10 @@
 +module_exit(exit_cpt);
 diff --git a/kernel/cpt/cpt_process.c b/kernel/cpt/cpt_process.c
 new file mode 100644
-index 0000000..6314bee
+index 0000000..7ca083c
 --- /dev/null
 +++ b/kernel/cpt/cpt_process.c
-@@ -0,0 +1,1379 @@
+@@ -0,0 +1,1380 @@
 +/*
 + *
 + *  kernel/cpt/cpt_process.c
@@ -53553,7 +53710,8 @@
 +		ri.cpt_debugreg[6] = tsk->thread.debugreg6;
 +		ri.cpt_debugreg[7] = tsk->thread.debugreg7;
 +		ri.cpt_fs = encode_segment(tsk->thread.fsindex);
-+		ri.cpt_gs = encode_segment(tsk->thread.gsindex);
++		ri.cpt_gs = CPT_SEG_ZERO;
++		ri.cpt_ugs = encode_segment(tsk->thread.gsindex);
 +
 +		xlate_ptregs_64_to_32(&ri, task_pt_regs(tsk), tsk);
 +
@@ -62423,7 +62581,7 @@
 +module_exit(exit_rst);
 diff --git a/kernel/cpt/rst_process.c b/kernel/cpt/rst_process.c
 new file mode 100644
-index 0000000..ffed431
+index 0000000..d9e7b75
 --- /dev/null
 +++ b/kernel/cpt/rst_process.c
 @@ -0,0 +1,1663 @@
@@ -63376,7 +63534,7 @@
 +		tsk->thread.fs = 0;
 +		tsk->thread.gs = 0;
 +		tsk->thread.fsindex = decode_segment(b->cpt_fs);
-+		tsk->thread.gsindex = decode_segment(b->cpt_gs);
++		tsk->thread.gsindex = decode_segment(b->cpt_ugs);
 +		tsk->thread.debugreg0 = b->cpt_debugreg[0];
 +		tsk->thread.debugreg1 = b->cpt_debugreg[1];
 +		tsk->thread.debugreg2 = b->cpt_debugreg[2];
@@ -66860,10 +67018,10 @@
 +}
 diff --git a/kernel/cpt/rst_undump.c b/kernel/cpt/rst_undump.c
 new file mode 100644
-index 0000000..68cc6c2
+index 0000000..c3d0074
 --- /dev/null
 +++ b/kernel/cpt/rst_undump.c
-@@ -0,0 +1,1077 @@
+@@ -0,0 +1,1074 @@
 +/*
 + *
 + *  kernel/cpt/rst_undump.c
@@ -67244,9 +67402,7 @@
 +
 +	if (ti->cpt_restart.fn != CPT_RBL_0) {
 +		if (ti->cpt_restart.fn == CPT_RBL_NANOSLEEP
-+#ifdef CONFIG_COMPAT
 +		    || ti->cpt_restart.fn == CPT_RBL_COMPAT_NANOSLEEP
-+#endif
 +		    ) {
 +			struct restart_block *rb;
 +			ktime_t e;
@@ -67264,10 +67420,9 @@
 +			e = ktime_add(e, timespec_to_ktime(ctx->cpt_monotonic_time));
 +
 +			rb = &task_thread_info(current)->restart_block;
-+			if (ti->cpt_restart.fn == CPT_RBL_NANOSLEEP)
-+				rb->fn = hrtimer_nanosleep_restart;
++			rb->fn = hrtimer_nanosleep_restart;
 +#ifdef CONFIG_COMPAT
-+			else
++			if (ti->cpt_restart.fn == CPT_RBL_COMPAT_NANOSLEEP)
 +				rb->fn = compat_nanosleep_restart;
 +#endif
 +			if (ctx->image_version >= CPT_VERSION_20) {
@@ -67319,7 +67474,7 @@
 +			rb->futex.time  = e.tv64;
 +			rb->futex.flags = ti->cpt_restart.arg3;
 +		} else
-+			eprintk_ctx("unknown restart block\n");
++			eprintk_ctx("unknown restart block (%d)\n", ti->cpt_restart.fn);
 +	}
 +
 +	if (thread_group_leader(current)) {
@@ -67955,7 +68110,7 @@
  		    (!cputime_eq(p->utime, cputime_zero) ||
  		     !cputime_eq(p->stime, cputime_zero)))
 diff --git a/kernel/exit.c b/kernel/exit.c
-index f7864ac..38b3e22 100644
+index 4a0e062..86da6c1 100644
 --- a/kernel/exit.c
 +++ b/kernel/exit.c
 @@ -22,6 +22,9 @@
@@ -68127,7 +68282,7 @@
  #ifdef CONFIG_NUMA
  	mpol_put(tsk->mempolicy);
  	tsk->mempolicy = NULL;
-@@ -1626,7 +1662,7 @@ repeat:
+@@ -1630,7 +1666,7 @@ repeat:
  
  		if (wo->wo_flags & __WNOTHREAD)
  			break;
@@ -68136,7 +68291,7 @@
  	read_unlock(&tasklist_lock);
  
  notask:
-@@ -1753,6 +1789,7 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
+@@ -1757,6 +1793,7 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
  	asmlinkage_protect(4, ret, upid, stat_addr, options, ru);
  	return ret;
  }
@@ -68834,7 +68989,7 @@
 +
 +#endif /* CONFIG_PROC_FS */
 diff --git a/kernel/fork.c b/kernel/fork.c
-index 28b4874..be960e6 100644
+index 9f3b066..1932409 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
 @@ -64,6 +64,8 @@
@@ -68900,7 +69055,7 @@
  #endif
  
  	/* do the arch specific task caches init */
-@@ -316,6 +330,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -317,6 +331,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
  			continue;
  		}
  		charge = 0;
@@ -68911,7 +69066,7 @@
  		if (mpnt->vm_flags & VM_ACCOUNT) {
  			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
  			if (security_vm_enough_memory(len))
-@@ -373,7 +391,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -376,7 +394,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
  		rb_parent = &tmp->vm_rb;
  
  		mm->map_count++;
@@ -68920,7 +69075,7 @@
  
  		if (tmp->vm_ops && tmp->vm_ops->open)
  			tmp->vm_ops->open(tmp);
-@@ -392,6 +410,9 @@ out:
+@@ -395,6 +413,9 @@ out:
  fail_nomem_policy:
  	kmem_cache_free(vm_area_cachep, tmp);
  fail_nomem:
@@ -68930,7 +69085,7 @@
  	retval = -ENOMEM;
  	vm_unacct_memory(charge);
  	goto out;
-@@ -459,6 +480,15 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
+@@ -462,6 +483,15 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
  	mm->cached_hole_size = ~0UL;
  	mm_init_aio(mm);
  	mm_init_owner(mm, p);
@@ -68946,7 +69101,7 @@
  
  	if (likely(!mm_alloc_pgd(mm))) {
  		mm->def_flags = 0;
-@@ -466,6 +496,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
+@@ -469,6 +499,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
  		return mm;
  	}
  
@@ -68954,7 +69109,7 @@
  	free_mm(mm);
  	return NULL;
  }
-@@ -484,6 +515,7 @@ struct mm_struct * mm_alloc(void)
+@@ -487,6 +518,7 @@ struct mm_struct * mm_alloc(void)
  	}
  	return mm;
  }
@@ -68962,7 +69117,7 @@
  
  /*
   * Called when the last reference to the mm
-@@ -496,6 +528,7 @@ void __mmdrop(struct mm_struct *mm)
+@@ -499,6 +531,7 @@ void __mmdrop(struct mm_struct *mm)
  	mm_free_pgd(mm);
  	destroy_context(mm);
  	mmu_notifier_mm_destroy(mm);
@@ -68970,7 +69125,7 @@
  	free_mm(mm);
  }
  EXPORT_SYMBOL_GPL(__mmdrop);
-@@ -520,6 +553,9 @@ void mmput(struct mm_struct *mm)
+@@ -523,6 +556,9 @@ void mmput(struct mm_struct *mm)
  		put_swap_token(mm);
  		if (mm->binfmt)
  			module_put(mm->binfmt->module);
@@ -68980,7 +69135,7 @@
  		mmdrop(mm);
  	}
  }
-@@ -570,18 +606,20 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
+@@ -573,18 +609,20 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
  
  	/* Get rid of any futexes when releasing the mm */
  #ifdef CONFIG_FUTEX
@@ -69011,7 +69166,7 @@
  #endif
  
  	/* Get rid of any cached register state */
-@@ -670,6 +708,7 @@ fail_nocontext:
+@@ -673,6 +711,7 @@ fail_nocontext:
  	 * because it calls destroy_context()
  	 */
  	mm_free_pgd(mm);
@@ -69019,7 +69174,7 @@
  	free_mm(mm);
  	return NULL;
  }
-@@ -975,6 +1014,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -981,6 +1020,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  					unsigned long stack_size,
  					int __user *child_tidptr,
  					struct pid *pid,
@@ -69027,7 +69182,7 @@
  					int trace)
  {
  	int retval;
-@@ -1022,6 +1062,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1028,6 +1068,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  
  	rt_mutex_init_task(p);
  
@@ -69037,7 +69192,7 @@
  #ifdef CONFIG_PROVE_LOCKING
  	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
  	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
-@@ -1145,7 +1188,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1151,7 +1194,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  		goto bad_fork_cleanup_sighand;
  	if ((retval = copy_mm(clone_flags, p)))
  		goto bad_fork_cleanup_signal;
@@ -69046,7 +69201,7 @@
  		goto bad_fork_cleanup_mm;
  	if ((retval = copy_io(clone_flags, p)))
  		goto bad_fork_cleanup_namespaces;
-@@ -1155,7 +1198,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1161,7 +1204,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  
  	if (pid != &init_struct_pid) {
  		retval = -ENOMEM;
@@ -69055,7 +69210,7 @@
  		if (!pid)
  			goto bad_fork_cleanup_io;
  
-@@ -1163,6 +1206,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1169,6 +1212,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  			retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
  			if (retval < 0)
  				goto bad_fork_free_pid;
@@ -69064,7 +69219,7 @@
  		}
  	}
  
-@@ -1262,7 +1307,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1268,7 +1313,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  	 * thread can't slip out of an OOM kill (or normal SIGKILL).
   	 */
  	recalc_sigpending();
@@ -69073,7 +69228,7 @@
  		spin_unlock(&current->sighand->siglock);
  		write_unlock_irq(&tasklist_lock);
  		retval = -ERESTARTNOINTR;
-@@ -1290,14 +1335,24 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1296,14 +1341,24 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
  			attach_pid(p, PIDTYPE_SID, task_session(current));
  			list_add_tail_rcu(&p->tasks, &init_task.tasks);
@@ -69098,7 +69253,7 @@
  	write_unlock_irq(&tasklist_lock);
  	proc_fork_connector(p);
  	cgroup_post_fork(p);
-@@ -1340,6 +1395,9 @@ bad_fork_cleanup_count:
+@@ -1346,6 +1401,9 @@ bad_fork_cleanup_count:
  	atomic_dec(&p->cred->user->processes);
  	exit_creds(p);
  bad_fork_free:
@@ -69108,7 +69263,7 @@
  	free_task(p);
  fork_out:
  	return ERR_PTR(retval);
-@@ -1357,7 +1415,7 @@ struct task_struct * __cpuinit fork_idle(int cpu)
+@@ -1363,7 +1421,7 @@ struct task_struct * __cpuinit fork_idle(int cpu)
  	struct pt_regs regs;
  
  	task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
@@ -69117,7 +69272,7 @@
  	if (!IS_ERR(task))
  		init_idle(task, cpu);
  
-@@ -1370,12 +1428,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
+@@ -1376,12 +1434,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
   * It copies the process, and if successful kick-starts
   * it and waits for it to finish using the VM if required.
   */
@@ -69133,7 +69288,7 @@
  {
  	struct task_struct *p;
  	int trace = 0;
-@@ -1413,6 +1472,10 @@ long do_fork(unsigned long clone_flags,
+@@ -1419,6 +1478,10 @@ long do_fork(unsigned long clone_flags,
  		}
  	}
  
@@ -69144,7 +69299,7 @@
  	/*
  	 * When called from kernel_thread, don't do user tracing stuff.
  	 */
-@@ -1420,7 +1483,7 @@ long do_fork(unsigned long clone_flags,
+@@ -1426,7 +1489,7 @@ long do_fork(unsigned long clone_flags,
  		trace = tracehook_prepare_clone(clone_flags);
  
  	p = copy_process(clone_flags, stack_start, regs, stack_size,
@@ -69153,7 +69308,7 @@
  	/*
  	 * Do this prior waking up the new thread - the thread pointer
  	 * might get invalid after that point, if the thread exits quickly.
-@@ -1451,6 +1514,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1457,6 +1520,8 @@ long do_fork(unsigned long clone_flags,
  		 */
  		p->flags &= ~PF_STARTING;
  
@@ -69162,7 +69317,7 @@
  		if (unlikely(clone_flags & CLONE_STOPPED)) {
  			/*
  			 * We'll start up with an immediate SIGSTOP.
-@@ -1474,6 +1539,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1480,6 +1545,8 @@ long do_fork(unsigned long clone_flags,
  	} else {
  		nr = PTR_ERR(p);
  	}
@@ -69171,7 +69326,7 @@
  	return nr;
  }
  
-@@ -1489,25 +1556,38 @@ static void sighand_ctor(void *data)
+@@ -1495,25 +1562,38 @@ static void sighand_ctor(void *data)
  	init_waitqueue_head(&sighand->signalfd_wqh);
  }
  
@@ -69258,10 +69413,10 @@
  	__set_current_state(save);
  }
 diff --git a/kernel/futex.c b/kernel/futex.c
-index 1ad4fa6..b65727e 100644
+index 3071911..097d1c5 100644
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1601,8 +1601,6 @@ handle_fault:
+@@ -1592,8 +1592,6 @@ handle_fault:
  #define FLAGS_CLOCKRT		0x02
  #define FLAGS_HAS_TIMEOUT	0x04
  
@@ -69270,7 +69425,7 @@
  /**
   * fixup_owner() - Post lock pi_state and corner case management
   * @uaddr:	user address of the futex
-@@ -1876,7 +1874,7 @@ out:
+@@ -1867,7 +1865,7 @@ out:
  }
  
  
@@ -69279,7 +69434,7 @@
  {
  	u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
  	int fshared = 0;
-@@ -1893,6 +1891,7 @@ static long futex_wait_restart(struct restart_block *restart)
+@@ -1884,6 +1882,7 @@ static long futex_wait_restart(struct restart_block *restart)
  				restart->futex.bitset,
  				restart->futex.flags & FLAGS_CLOCKRT);
  }
@@ -69494,10 +69649,10 @@
  	printk("\n");
  	printk("=============================================\n\n");
 diff --git a/kernel/module.c b/kernel/module.c
-index dfa33e8..48a2edc 100644
+index 4b270e6..94cbbde 100644
 --- a/kernel/module.c
 +++ b/kernel/module.c
-@@ -2915,6 +2915,8 @@ static char *module_flags(struct module *mod, char *buf)
+@@ -2917,6 +2917,8 @@ static char *module_flags(struct module *mod, char *buf)
  static void *m_start(struct seq_file *m, loff_t *pos)
  {
  	mutex_lock(&module_mutex);
@@ -69506,7 +69661,7 @@
  	return seq_list_start(&modules, *pos);
  }
  
-@@ -2979,7 +2981,7 @@ static const struct file_operations proc_modules_operations = {
+@@ -2981,7 +2983,7 @@ static const struct file_operations proc_modules_operations = {
  
  static int __init proc_modules_init(void)
  {
@@ -70052,7 +70207,7 @@
  }
  
 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
-index 4954407..da76c51 100644
+index 5e76d22..2ec2a6f 100644
 --- a/kernel/posix-timers.c
 +++ b/kernel/posix-timers.c
 @@ -31,6 +31,8 @@
@@ -70694,7 +70849,7 @@
  	child = find_task_by_vpid(pid);
  	if (child)
 diff --git a/kernel/sched.c b/kernel/sched.c
-index 34d924e..bf1165c 100644
+index 9990074..cdbd1b9 100644
 --- a/kernel/sched.c
 +++ b/kernel/sched.c
 @@ -71,6 +71,8 @@
@@ -70959,7 +71114,7 @@
  /*
   * this_rq_lock - lock this runqueue and disable interrupts.
   */
-@@ -1943,11 +2170,21 @@ static int effective_prio(struct task_struct *p)
+@@ -1950,11 +2177,21 @@ static int effective_prio(struct task_struct *p)
   */
  static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
  {
@@ -70982,7 +71137,7 @@
  }
  
  /*
-@@ -1955,11 +2192,31 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
+@@ -1962,11 +2199,31 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
   */
  static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
  {
@@ -71015,7 +71170,7 @@
  }
  
  /**
-@@ -2276,6 +2533,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+@@ -2283,6 +2540,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
  
  	return ncsw;
  }
@@ -71023,7 +71178,7 @@
  
  /***
   * kick_process - kick a running thread to enter/exit the kernel
-@@ -2372,8 +2630,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
+@@ -2379,8 +2637,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
  	 *
  	 * First fix up the nr_uninterruptible count:
  	 */
@@ -71036,7 +71191,7 @@
  	p->state = TASK_WAKING;
  	task_rq_unlock(rq, &flags);
  
-@@ -2607,6 +2868,10 @@ void sched_fork(struct task_struct *p, int clone_flags)
+@@ -2614,6 +2875,10 @@ void sched_fork(struct task_struct *p, int clone_flags)
  	/* Want to start with kernel preemption disabled. */
  	task_thread_info(p)->preempt_count = 1;
  #endif
@@ -71047,7 +71202,7 @@
  	plist_node_init(&p->pushable_tasks, MAX_PRIO);
  
  	put_cpu();
-@@ -2637,6 +2902,8 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
+@@ -2644,6 +2909,8 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
  		 */
  		p->sched_class->task_new(rq, p);
  		inc_nr_running(rq);
@@ -71056,7 +71211,7 @@
  	}
  	trace_sched_wakeup_new(rq, p, 1);
  	check_preempt_curr(rq, p, WF_FORK);
-@@ -2839,6 +3106,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
+@@ -2846,6 +3113,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
  	if (current->set_child_tid)
  		put_user(task_pid_vnr(current), current->set_child_tid);
  }
@@ -71064,7 +71219,7 @@
  
  /*
   * context_switch - switch to the new MM and the new
-@@ -2910,6 +3178,7 @@ unsigned long nr_running(void)
+@@ -2917,6 +3185,7 @@ unsigned long nr_running(void)
  
  	return sum;
  }
@@ -71072,7 +71227,7 @@
  
  unsigned long nr_uninterruptible(void)
  {
-@@ -2927,6 +3196,7 @@ unsigned long nr_uninterruptible(void)
+@@ -2934,6 +3203,7 @@ unsigned long nr_uninterruptible(void)
  
  	return sum;
  }
@@ -71080,7 +71235,7 @@
  
  unsigned long long nr_context_switches(void)
  {
-@@ -2962,6 +3232,72 @@ unsigned long this_cpu_load(void)
+@@ -2969,6 +3239,72 @@ unsigned long this_cpu_load(void)
  }
  
  
@@ -71153,7 +71308,7 @@
  /* Variables and functions for calc_load */
  static atomic_long_t calc_load_tasks;
  static unsigned long calc_load_update;
-@@ -2983,6 +3319,16 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+@@ -2990,6 +3326,16 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
  	loads[2] = (avenrun[2] + offset) << shift;
  }
  
@@ -71170,7 +71325,7 @@
  static unsigned long
  calc_load(unsigned long load, unsigned long exp, unsigned long active)
  {
-@@ -2991,6 +3337,35 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
+@@ -2998,6 +3344,35 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
  	return load >> FSHIFT;
  }
  
@@ -71206,7 +71361,7 @@
  /*
   * calc_load - update the avenrun load estimates 10 ticks after the
   * CPUs have updated calc_load_tasks.
-@@ -3010,6 +3385,8 @@ void calc_global_load(void)
+@@ -3017,6 +3392,8 @@ void calc_global_load(void)
  	avenrun[1] = calc_load(avenrun[1], EXP_5, active);
  	avenrun[2] = calc_load(avenrun[2], EXP_15, active);
  
@@ -71215,7 +71370,7 @@
  	calc_load_update += LOAD_FREQ;
  }
  
-@@ -3074,6 +3451,16 @@ static void update_cpu_load(struct rq *this_rq)
+@@ -3081,6 +3458,16 @@ static void update_cpu_load(struct rq *this_rq)
  	}
  }
  
@@ -71232,7 +71387,7 @@
  #ifdef CONFIG_SMP
  
  /*
-@@ -3174,8 +3561,15 @@ void sched_exec(void)
+@@ -3181,8 +3568,15 @@ void sched_exec(void)
  static void pull_task(struct rq *src_rq, struct task_struct *p,
  		      struct rq *this_rq, int this_cpu)
  {
@@ -71248,7 +71403,7 @@
  	activate_task(this_rq, p, 0);
  	check_preempt_curr(this_rq, p, 0);
  }
-@@ -5052,10 +5446,13 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
+@@ -5059,10 +5453,13 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
  
  	/* Add user time to cpustat. */
  	tmp = cputime_to_cputime64(cputime);
@@ -71264,7 +71419,7 @@
  
  	cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
  	/* Account for user time used */
-@@ -5112,6 +5509,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
+@@ -5119,6 +5516,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
  
  	/* Add system time to cpustat. */
  	tmp = cputime_to_cputime64(cputime);
@@ -71272,7 +71427,7 @@
  	if (hardirq_count() - hardirq_offset)
  		cpustat->irq = cputime64_add(cpustat->irq, tmp);
  	else if (softirq_count())
-@@ -5490,6 +5888,8 @@ need_resched_nonpreemptible:
+@@ -5542,6 +5940,8 @@ need_resched_nonpreemptible:
  	next = pick_next_task(rq);
  
  	if (likely(prev != next)) {
@@ -71281,7 +71436,7 @@
  		sched_info_switch(prev, next);
  		perf_event_task_sched_out(prev, next, cpu);
  
-@@ -5497,6 +5897,22 @@ need_resched_nonpreemptible:
+@@ -5549,6 +5949,22 @@ need_resched_nonpreemptible:
  		rq->curr = next;
  		++*switch_count;
  
@@ -71304,7 +71459,7 @@
  		context_switch(rq, prev, next); /* unlocks the rq */
  		/*
  		 * the context switch might have flipped the stack from under
-@@ -5504,8 +5920,10 @@ need_resched_nonpreemptible:
+@@ -5556,8 +5972,10 @@ need_resched_nonpreemptible:
  		 */
  		cpu = smp_processor_id();
  		rq = cpu_rq(cpu);
@@ -71316,7 +71471,7 @@
  
  	post_schedule(rq);
  
-@@ -6289,7 +6707,7 @@ recheck:
+@@ -6341,7 +6759,7 @@ recheck:
  	/*
  	 * Allow unprivileged RT tasks to decrease priority:
  	 */
@@ -71325,7 +71480,7 @@
  		if (rt_policy(policy)) {
  			unsigned long rlim_rtprio;
  
-@@ -6800,11 +7218,16 @@ EXPORT_SYMBOL(yield);
+@@ -6852,11 +7270,16 @@ EXPORT_SYMBOL(yield);
  void __sched io_schedule(void)
  {
  	struct rq *rq = raw_rq();
@@ -71342,7 +71497,7 @@
  	current->in_iowait = 0;
  	atomic_dec(&rq->nr_iowait);
  	delayacct_blkio_end();
-@@ -6815,11 +7238,16 @@ long __sched io_schedule_timeout(long timeout)
+@@ -6867,11 +7290,16 @@ long __sched io_schedule_timeout(long timeout)
  {
  	struct rq *rq = raw_rq();
  	long ret;
@@ -71359,7 +71514,7 @@
  	current->in_iowait = 0;
  	atomic_dec(&rq->nr_iowait);
  	delayacct_blkio_end();
-@@ -6926,17 +7354,7 @@ void sched_show_task(struct task_struct *p)
+@@ -6978,17 +7406,7 @@ void sched_show_task(struct task_struct *p)
  	state = p->state ? __ffs(p->state) + 1 : 0;
  	printk(KERN_INFO "%-13.13s %c", p->comm,
  		state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
@@ -71378,7 +71533,7 @@
  #ifdef CONFIG_DEBUG_STACK_USAGE
  	free = stack_not_used(p);
  #endif
-@@ -6953,13 +7371,13 @@ void show_state_filter(unsigned long state_filter)
+@@ -7005,13 +7423,13 @@ void show_state_filter(unsigned long state_filter)
  
  #if BITS_PER_LONG == 32
  	printk(KERN_INFO
@@ -71395,7 +71550,7 @@
  		/*
  		 * reset the NMI-timeout, listing all files on a slow
  		 * console might take alot of time:
-@@ -6967,7 +7385,7 @@ void show_state_filter(unsigned long state_filter)
+@@ -7019,7 +7437,7 @@ void show_state_filter(unsigned long state_filter)
  		touch_nmi_watchdog();
  		if (!state_filter || (p->state & state_filter))
  			sched_show_task(p);
@@ -71404,7 +71559,7 @@
  
  	touch_all_softlockup_watchdogs();
  
-@@ -7336,13 +7754,13 @@ static void migrate_live_tasks(int src_cpu)
+@@ -7388,13 +7806,13 @@ static void migrate_live_tasks(int src_cpu)
  
  	read_lock(&tasklist_lock);
  
@@ -71420,7 +71575,7 @@
  
  	read_unlock(&tasklist_lock);
  }
-@@ -9490,6 +9908,7 @@ void __init sched_init(void)
+@@ -9542,6 +9960,7 @@ void __init sched_init(void)
  	update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
  					    __alignof__(unsigned long));
  #endif
@@ -71428,7 +71583,7 @@
  	for_each_possible_cpu(i) {
  		struct rq *rq;
  
-@@ -9503,7 +9922,7 @@ void __init sched_init(void)
+@@ -9555,7 +9974,7 @@ void __init sched_init(void)
  #ifdef CONFIG_FAIR_GROUP_SCHED
  		init_task_group.shares = init_task_group_load;
  		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
@@ -71437,7 +71592,7 @@
  		/*
  		 * How much cpu bandwidth does init_task_group get?
  		 *
-@@ -9549,7 +9968,7 @@ void __init sched_init(void)
+@@ -9601,7 +10020,7 @@ void __init sched_init(void)
  		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
  #ifdef CONFIG_RT_GROUP_SCHED
  		INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
@@ -71446,7 +71601,7 @@
  		init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
  #elif defined CONFIG_USER_SCHED
  		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
-@@ -9615,6 +10034,7 @@ void __init sched_init(void)
+@@ -9667,6 +10086,7 @@ void __init sched_init(void)
  	 * During early bootup we pretend to be a normal task:
  	 */
  	current->sched_class = &fair_sched_class;
@@ -71454,7 +71609,7 @@
  
  	/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
  	zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
-@@ -9693,7 +10113,7 @@ void normalize_rt_tasks(void)
+@@ -9745,7 +10165,7 @@ void normalize_rt_tasks(void)
  	struct rq *rq;
  
  	read_lock_irqsave(&tasklist_lock, flags);
@@ -71463,7 +71618,7 @@
  		/*
  		 * Only normalize user tasks:
  		 */
-@@ -9724,7 +10144,7 @@ void normalize_rt_tasks(void)
+@@ -9776,7 +10196,7 @@ void normalize_rt_tasks(void)
  
  		__task_rq_unlock(rq);
  		spin_unlock(&p->pi_lock);
@@ -71472,7 +71627,7 @@
  
  	read_unlock_irqrestore(&tasklist_lock, flags);
  }
-@@ -10170,10 +10590,10 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
+@@ -10222,10 +10642,10 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
  {
  	struct task_struct *g, *p;
  
@@ -71505,7 +71660,7 @@
  	read_unlock_irqrestore(&tasklist_lock, flags);
  }
 diff --git a/kernel/signal.c b/kernel/signal.c
-index 4d0658d..fcb5698 100644
+index 423655a..07e7db6 100644
 --- a/kernel/signal.c
 +++ b/kernel/signal.c
 @@ -33,13 +33,32 @@
@@ -71610,7 +71765,7 @@
  
  /*
   * Remove signals in mask from the pending set and queue.
-@@ -655,7 +693,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
+@@ -657,7 +695,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
  		t = p;
  		do {
  			rm_from_queue(sigmask(SIGCONT), &t->pending);
@@ -71619,7 +71774,7 @@
  	} else if (sig == SIGCONT) {
  		unsigned int why;
  		/*
-@@ -687,7 +725,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
+@@ -689,7 +727,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
  				state |= TASK_INTERRUPTIBLE;
  			}
  			wake_up_state(t, state);
@@ -71628,7 +71783,7 @@
  
  		/*
  		 * Notify the parent with CLD_CONTINUED if we were stopped.
-@@ -809,7 +847,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
+@@ -811,7 +849,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
  			do {
  				sigaddset(&t->pending.signal, SIGKILL);
  				signal_wake_up(t, 1);
@@ -71637,7 +71792,7 @@
  			return;
  		}
  	}
-@@ -1080,7 +1118,8 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+@@ -1082,7 +1120,8 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
  	int ret = check_kill_permission(sig, info, p);
  
  	if (!ret && sig)
@@ -71647,7 +71802,7 @@
  
  	return ret;
  }
-@@ -1205,7 +1244,7 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
+@@ -1207,7 +1246,7 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
  		int retval = 0, count = 0;
  		struct task_struct * p;
  
@@ -71656,7 +71811,7 @@
  			if (task_pid_vnr(p) > 1 &&
  					!same_thread_group(p, current)) {
  				int err = group_send_sig_info(sig, info, p);
-@@ -1396,6 +1435,14 @@ int do_notify_parent(struct task_struct *tsk, int sig)
+@@ -1398,6 +1437,14 @@ int do_notify_parent(struct task_struct *tsk, int sig)
  	BUG_ON(!task_ptrace(tsk) &&
  	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
  
@@ -71671,7 +71826,7 @@
  	info.si_signo = sig;
  	info.si_errno = 0;
  	/*
-@@ -1720,7 +1767,9 @@ static int do_signal_stop(int signr)
+@@ -1722,7 +1769,9 @@ static int do_signal_stop(int signr)
  
  	/* Now we don't run again until woken by SIGCONT or SIGKILL */
  	do {
@@ -71681,7 +71836,7 @@
  	} while (try_to_freeze());
  
  	tracehook_finish_jctl();
-@@ -1782,8 +1831,6 @@ relock:
+@@ -1784,8 +1833,6 @@ relock:
  	 * Now that we woke up, it's crucial if we're supposed to be
  	 * frozen that we freeze now before running anything substantial.
  	 */
@@ -71690,7 +71845,7 @@
  	spin_lock_irq(&sighand->siglock);
  	/*
  	 * Every stopped thread goes here after wakeup. Check to see if
-@@ -2281,7 +2328,8 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+@@ -2283,7 +2330,8 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
  		 * probe.  No signal is actually delivered.
  		 */
  		if (!error && sig) {
@@ -71700,7 +71855,7 @@
  			/*
  			 * If lock_task_sighand() failed we pretend the task
  			 * dies after receiving the signal. The window is tiny,
-@@ -2678,5 +2726,5 @@ __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
+@@ -2680,5 +2728,5 @@ __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
  
  void __init signals_init(void)
  {
@@ -71768,7 +71923,7 @@
  	if (!in_interrupt() && local_softirq_pending())
  		invoke_softirq();
 diff --git a/kernel/sys.c b/kernel/sys.c
-index ce17760..3073c3e 100644
+index 26e4b8a..d182032 100644
 --- a/kernel/sys.c
 +++ b/kernel/sys.c
 @@ -10,6 +10,8 @@
@@ -71912,7 +72067,7 @@
  			if (who != cred->uid)
  				free_uid(user);		/* for find_user() */
  			break;
-@@ -375,6 +473,25 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
+@@ -375,6 +473,27 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
  	                magic2 != LINUX_REBOOT_MAGIC2C))
  		return -EINVAL;
  
@@ -71920,9 +72075,11 @@
 +	if (!ve_is_super(get_exec_env()))
 +		switch (cmd) {
 +		case LINUX_REBOOT_CMD_RESTART:
++		case LINUX_REBOOT_CMD_RESTART2:
++			set_bit(VE_REBOOT, &get_exec_env()->flags);
++
 +		case LINUX_REBOOT_CMD_HALT:
 +		case LINUX_REBOOT_CMD_POWER_OFF:
-+		case LINUX_REBOOT_CMD_RESTART2:
 +			force_sig(SIGKILL,
 +				get_exec_env()->ve_ns->pid_ns->child_reaper);
 +
@@ -71938,7 +72095,7 @@
  	/* Instead of trying to make the power_off code look like
  	 * halt when pm_power_off is not set do it the easy way.
  	 */
-@@ -925,8 +1042,27 @@ void do_sys_times(struct tms *tms)
+@@ -924,8 +1043,27 @@ void do_sys_times(struct tms *tms)
  	tms->tms_cstime = cputime_to_clock_t(cstime);
  }
  
@@ -71966,7 +72123,7 @@
  	if (tbuf) {
  		struct tms tmp;
  
-@@ -934,8 +1070,15 @@ SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
+@@ -933,8 +1071,15 @@ SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
  		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
  			return -EFAULT;
  	}
@@ -71982,7 +72139,7 @@
  }
  
  /*
-@@ -1133,7 +1276,7 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
+@@ -1132,7 +1277,7 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
  	int errno;
  	char tmp[__NEW_UTS_LEN];
  
@@ -71991,7 +72148,7 @@
  		return -EPERM;
  	if (len < 0 || len > __NEW_UTS_LEN)
  		return -EINVAL;
-@@ -1182,7 +1325,7 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
+@@ -1181,7 +1326,7 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
  	int errno;
  	char tmp[__NEW_UTS_LEN];
  
@@ -72444,7 +72601,7 @@
  	return div_u64(x, NSEC_PER_SEC / USER_HZ);
  #elif (USER_HZ % 512) == 0
 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
-index 8b709de..0af7669 100644
+index 26e2f37..5c773b3 100644
 --- a/kernel/time/timekeeping.c
 +++ b/kernel/time/timekeeping.c
 @@ -154,6 +154,7 @@ __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
@@ -72674,10 +72831,10 @@
  
 diff --git a/kernel/ve/Makefile b/kernel/ve/Makefile
 new file mode 100644
-index 0000000..9d60161
+index 0000000..5513981
 --- /dev/null
 +++ b/kernel/ve/Makefile
-@@ -0,0 +1,16 @@
+@@ -0,0 +1,17 @@
 +#
 +#
 +#  kernel/ve/Makefile
@@ -72694,6 +72851,7 @@
 +vzmon-objs = vecalls.o
 +
 +obj-$(CONFIG_VZ_DEV) += vzdev.o
++obj-$(CONFIG_VZ_EVENT) += vzevent.o
 diff --git a/kernel/ve/hooks.c b/kernel/ve/hooks.c
 new file mode 100644
 index 0000000..1b82c35
@@ -75650,10 +75808,10 @@
 +module_exit(vzctl_exit);
 diff --git a/kernel/ve/vzevent.c b/kernel/ve/vzevent.c
 new file mode 100644
-index 0000000..554f169
+index 0000000..dc2fc20
 --- /dev/null
 +++ b/kernel/ve/vzevent.c
-@@ -0,0 +1,125 @@
+@@ -0,0 +1,139 @@
 +#include <linux/module.h>
 +#include <linux/kernel.h>
 +#include <linux/skbuff.h>
@@ -75666,6 +75824,10 @@
 +#define NETLINK_UEVENT	31
 +#define VZ_EVGRP_ALL	0x01
 +
++static int reboot_event;
++module_param(reboot_event, int, 0644);
++MODULE_PARM_DESC(reboot_event, "Enable reboot events");
++
 +/*
 + * NOTE: the original idea was to send events via kobject_uevent(),
 + * however, it turns out that it has negative consequences like
@@ -75677,14 +75839,16 @@
 +static char *action_to_string(int action)
 +{
 +	switch (action) {
-+	case KOBJ_MOUNT:
++	case VE_EVENT_MOUNT:
 +		return "ve-mount";
-+	case KOBJ_UMOUNT:
++	case VE_EVENT_UMOUNT:
 +		return "ve-umount";
-+	case KOBJ_START:
++	case VE_EVENT_START:
 +		return "ve-start";
-+	case KOBJ_STOP:
++	case VE_EVENT_STOP:
 +		return "ve-stop";
++	case VE_EVENT_REBOOT:
++		return "ve-reboot";
 +	default:
 +		return NULL;
 +	}
@@ -75697,6 +75861,9 @@
 +	int alen;
 +
 +	action = action_to_string(event);
++	if (!action)
++		return -EINVAL;
++
 +	alen = strlen(action);
 +
 +	skb = alloc_skb(len + 1 + alen, GFP_KERNEL);
@@ -75741,16 +75908,21 @@
 +	struct ve_struct *ve;
 +
 +	ve = (struct ve_struct *)data;
-+	vzevent_send(KOBJ_START, "%d", ve->veid);
++	vzevent_send(VE_EVENT_START, "%d", ve->veid);
 +	return 0;
 +}
 +
 +static void ve_stop(void *data)
 +{
 +	struct ve_struct *ve;
++	int event = VE_EVENT_STOP;
++
++	if (test_and_clear_bit(VE_REBOOT, &get_exec_env()->flags) &&
++		reboot_event)
++		event = VE_EVENT_REBOOT;
 +
 +	ve = (struct ve_struct *)data;
-+	vzevent_send(KOBJ_STOP, "%d", ve->veid);
++	vzevent_send(event, "%d", ve->veid);
 +}
 +
 +static struct ve_hook ve_start_stop_hook = {
@@ -75762,7 +75934,7 @@
 +
 +static int __init init_vzevent(void)
 +{
-+	vzev_sock = netlink_kernel_create(NETLINK_UEVENT, 0, NULL, THIS_MODULE);
++	vzev_sock = netlink_kernel_create(&init_net, NETLINK_UEVENT, 0, NULL, NULL, THIS_MODULE);
 +	if (vzev_sock == NULL)
 +		return -ENOMEM;
 +	ve_hook_register(VE_SS_CHAIN, &ve_start_stop_hook);
@@ -76193,7 +76365,7 @@
  }
 +EXPORT_SYMBOL_GPL(show_mem);
 diff --git a/mm/filemap.c b/mm/filemap.c
-index 8e96c90..9189743 100644
+index 46e3f8a..4e0d03f 100644
 --- a/mm/filemap.c
 +++ b/mm/filemap.c
 @@ -42,6 +42,7 @@
@@ -76302,7 +76474,7 @@
  {
  	munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
-index dacc641..9d28f5c 100644
+index 8aeba53..9b753fe 100644
 --- a/mm/memory-failure.c
 +++ b/mm/memory-failure.c
 @@ -226,7 +226,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
@@ -76324,7 +76496,7 @@
  
  		if (!task_early_kill(tsk))
 diff --git a/mm/memory.c b/mm/memory.c
-index 4e59455..c5108b1 100644
+index 194dc17..8bb23cc 100644
 --- a/mm/memory.c
 +++ b/mm/memory.c
 @@ -42,6 +42,9 @@
@@ -76587,7 +76759,7 @@
  	add_mm_rss(mm, file_rss, anon_rss);
  	arch_leave_lazy_mmu_mode();
  	pte_unmap_unlock(pte - 1, ptl);
-@@ -1994,6 +2051,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2004,6 +2061,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	int reuse = 0, ret = 0;
  	int page_mkwrite = 0;
  	struct page *dirty_page = NULL;
@@ -76595,7 +76767,7 @@
  
  	old_page = vm_normal_page(vma, address, orig_pte);
  	if (!old_page) {
-@@ -2100,6 +2158,8 @@ reuse:
+@@ -2110,6 +2168,8 @@ reuse:
  		flush_cache_page(vma, address, pte_pfn(orig_pte));
  		entry = pte_mkyoung(orig_pte);
  		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -76604,7 +76776,7 @@
  		if (ptep_set_access_flags(vma, address, page_table, entry,1))
  			update_mmu_cache(vma, address, entry);
  		ret |= VM_FAULT_WRITE;
-@@ -2113,6 +2173,9 @@ reuse:
+@@ -2123,6 +2183,9 @@ reuse:
  gotten:
  	pte_unmap_unlock(page_table, ptl);
  
@@ -76614,7 +76786,7 @@
  	if (unlikely(anon_vma_prepare(vma)))
  		goto oom;
  
-@@ -2147,12 +2210,15 @@ gotten:
+@@ -2157,12 +2220,15 @@ gotten:
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
  	if (likely(pte_same(*page_table, orig_pte))) {
  		if (old_page) {
@@ -76631,7 +76803,7 @@
  		flush_cache_page(vma, address, pte_pfn(orig_pte));
  		entry = mk_pte(new_page, vma->vm_page_prot);
  		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-@@ -2164,6 +2230,7 @@ gotten:
+@@ -2174,6 +2240,7 @@ gotten:
  		 */
  		ptep_clear_flush(vma, address, page_table);
  		page_add_new_anon_rmap(new_page, vma, address);
@@ -76639,7 +76811,7 @@
  		/*
  		 * We call the notify macro here because, when using secondary
  		 * mmu page tables (such as kvm shadow page tables), we want the
-@@ -2207,6 +2274,7 @@ gotten:
+@@ -2217,6 +2284,7 @@ gotten:
  		page_cache_release(new_page);
  	if (old_page)
  		page_cache_release(old_page);
@@ -76647,7 +76819,7 @@
  unlock:
  	pte_unmap_unlock(page_table, ptl);
  	if (dirty_page) {
-@@ -2246,6 +2314,8 @@ unlock:
+@@ -2256,6 +2324,8 @@ unlock:
  oom_free_new:
  	page_cache_release(new_page);
  oom:
@@ -76656,7 +76828,7 @@
  	if (old_page) {
  		if (page_mkwrite) {
  			unlock_page(old_page);
-@@ -2502,10 +2572,16 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2512,10 +2582,16 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	pte_t pte;
  	struct mem_cgroup *ptr = NULL;
  	int ret = 0;
@@ -76674,7 +76846,7 @@
  	entry = pte_to_swp_entry(orig_pte);
  	if (unlikely(non_swap_entry(entry))) {
  		if (is_migration_entry(entry)) {
-@@ -2580,6 +2656,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2590,6 +2666,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	 */
  
  	inc_mm_counter(mm, anon_rss);
@@ -76682,7 +76854,7 @@
  	pte = mk_pte(page, vma->vm_page_prot);
  	if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
  		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
-@@ -2588,11 +2665,14 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2598,11 +2675,14 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	flush_icache_page(vma, page);
  	set_pte_at(mm, address, page_table, pte);
  	page_add_anon_rmap(page, vma, address);
@@ -76698,7 +76870,7 @@
  		try_to_free_swap(page);
  	unlock_page(page);
  
-@@ -2608,6 +2688,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2618,6 +2698,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  unlock:
  	pte_unmap_unlock(page_table, ptl);
  out:
@@ -76710,7 +76882,7 @@
  	return ret;
  out_nomap:
  	mem_cgroup_cancel_charge_swapin(ptr);
-@@ -2615,6 +2700,7 @@ out_nomap:
+@@ -2625,6 +2710,7 @@ out_nomap:
  out_page:
  	unlock_page(page);
  out_release:
@@ -76718,7 +76890,7 @@
  	page_cache_release(page);
  	return ret;
  }
-@@ -2631,6 +2717,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2668,6 +2754,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	struct page *page;
  	spinlock_t *ptl;
  	pte_t entry;
@@ -76726,7 +76898,7 @@
  
  	pte_unmap(page_table);
  
-@@ -2645,6 +2732,9 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2686,6 +2773,9 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	}
  
  	/* Allocate our own private page. */
@@ -76736,7 +76908,7 @@
  	if (unlikely(anon_vma_prepare(vma)))
  		goto oom;
  	page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -2665,12 +2755,15 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2706,12 +2796,15 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  
  	inc_mm_counter(mm, anon_rss);
  	page_add_new_anon_rmap(page, vma, address);
@@ -76752,7 +76924,7 @@
  	pte_unmap_unlock(page_table, ptl);
  	return 0;
  release:
-@@ -2680,6 +2773,8 @@ release:
+@@ -2721,6 +2814,8 @@ release:
  oom_free_page:
  	page_cache_release(page);
  oom:
@@ -76761,7 +76933,7 @@
  	return VM_FAULT_OOM;
  }
  
-@@ -2707,6 +2802,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2748,6 +2843,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	int anon = 0;
  	int charged = 0;
  	struct page *dirty_page = NULL;
@@ -76769,7 +76941,7 @@
  	struct vm_fault vmf;
  	int ret;
  	int page_mkwrite = 0;
-@@ -2716,9 +2812,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2757,9 +2853,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	vmf.flags = flags;
  	vmf.page = NULL;
  
@@ -76784,7 +76956,7 @@
  
  	if (unlikely(PageHWPoison(vmf.page))) {
  		if (ret & VM_FAULT_LOCKED)
-@@ -2812,6 +2912,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2853,6 +2953,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	 */
  	/* Only go through if we didn't race with anybody else... */
  	if (likely(pte_same(*page_table, orig_pte))) {
@@ -76793,7 +76965,7 @@
  		flush_icache_page(vma, page);
  		entry = mk_pte(page, vma->vm_page_prot);
  		if (flags & FAULT_FLAG_WRITE)
-@@ -2828,6 +2930,25 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2869,6 +2971,25 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  			}
  		}
  		set_pte_at(mm, address, page_table, entry);
@@ -76819,7 +76991,7 @@
  
  		/* no need to invalidate: a not-present page won't be cached */
  		update_mmu_cache(vma, address, entry);
-@@ -2867,6 +2988,9 @@ out:
+@@ -2908,6 +3029,9 @@ out:
  			page_cache_release(vmf.page);
  	}
  
@@ -76829,7 +77001,7 @@
  	return ret;
  
  unwritable_page:
-@@ -2994,6 +3118,27 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3035,6 +3159,27 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	pmd_t *pmd;
  	pte_t *pte;
  
@@ -76857,7 +77029,7 @@
  	__set_current_state(TASK_RUNNING);
  
  	count_vm_event(PGFAULT);
-@@ -3038,6 +3183,8 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3079,6 +3224,8 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  }
  #endif /* __PAGETABLE_PUD_FOLDED */
  
@@ -76866,7 +77038,7 @@
  #ifndef __PAGETABLE_PMD_FOLDED
  /*
   * Allocate page middle directory.
-@@ -3068,6 +3215,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3109,6 +3256,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
  }
  #endif /* __PAGETABLE_PMD_FOLDED */
  
@@ -76875,7 +77047,7 @@
  int make_pages_present(unsigned long addr, unsigned long end)
  {
  	int ret, len, write;
-@@ -3087,6 +3236,8 @@ int make_pages_present(unsigned long addr, unsigned long end)
+@@ -3128,6 +3277,8 @@ int make_pages_present(unsigned long addr, unsigned long end)
  	return ret == len ? 0 : -EFAULT;
  }
  
@@ -76914,7 +77086,7 @@
  	gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
  
 diff --git a/mm/mlock.c b/mm/mlock.c
-index 2e05c97..1ebf6e1 100644
+index 380ea89..59190a0 100644
 --- a/mm/mlock.c
 +++ b/mm/mlock.c
 @@ -18,6 +18,7 @@
@@ -76925,7 +77097,7 @@
  
  #include "internal.h"
  
-@@ -309,12 +310,14 @@ no_mlock:
+@@ -328,12 +329,14 @@ no_mlock:
   * and re-mlocked by try_to_{munlock|unmap} before we unmap and
   * free them.  This will result in freeing mlocked pages.
   */
@@ -76942,7 +77114,7 @@
  	vma->vm_flags &= ~VM_LOCKED;
  
  	for (addr = start; addr < end; addr += PAGE_SIZE) {
-@@ -374,6 +377,12 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
+@@ -393,6 +396,12 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
  		goto out;	/* don't set VM_LOCKED,  don't count */
  	}
  
@@ -76955,7 +77127,7 @@
  	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
  			  vma->vm_file, pgoff, vma_policy(vma));
-@@ -385,13 +394,13 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
+@@ -404,13 +413,13 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
  	if (start != vma->vm_start) {
  		ret = split_vma(mm, vma, start, 1);
  		if (ret)
@@ -76971,7 +77143,7 @@
  	}
  
  success:
-@@ -421,6 +430,11 @@ success:
+@@ -440,6 +449,11 @@ success:
  out:
  	*prev = vma;
  	return ret;
@@ -76983,7 +77155,7 @@
  }
  
  static int do_mlock(unsigned long start, size_t len, int on)
-@@ -499,6 +513,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
+@@ -518,6 +532,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
  	up_write(&current->mm->mmap_sem);
  	return error;
  }
@@ -76991,7 +77163,7 @@
  
  SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
  {
-@@ -511,6 +526,7 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
+@@ -530,6 +545,7 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
  	up_write(&current->mm->mmap_sem);
  	return ret;
  }
@@ -77000,7 +77172,7 @@
  static int do_mlockall(int flags)
  {
 diff --git a/mm/mmap.c b/mm/mmap.c
-index ae19746..a5dd0bf 100644
+index b309c75..a3ef2d2 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
 @@ -29,6 +29,7 @@
@@ -77063,7 +77235,7 @@
  		goto out;
  set_brk:
  	mm->brk = brk;
-@@ -1106,6 +1125,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1116,6 +1135,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
  	struct rb_node **rb_link, *rb_parent;
  	unsigned long charged = 0;
  	struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
@@ -77071,7 +77243,7 @@
  
  	/* Clear old maps */
  	error = -ENOMEM;
-@@ -1145,6 +1165,11 @@ munmap_back:
+@@ -1155,6 +1175,11 @@ munmap_back:
  		vm_flags |= VM_ACCOUNT;
  	}
  
@@ -77083,7 +77255,7 @@
  	/*
  	 * Can we just expand an old mapping?
  	 */
-@@ -1157,7 +1182,8 @@ munmap_back:
+@@ -1167,7 +1192,8 @@ munmap_back:
  	 * specific mapper. the address has already been validated, but
  	 * not unmapped, but the maps are removed from the list.
  	 */
@@ -77093,7 +77265,7 @@
  	if (!vma) {
  		error = -ENOMEM;
  		goto unacct_error;
-@@ -1187,6 +1213,19 @@ munmap_back:
+@@ -1197,6 +1223,19 @@ munmap_back:
  			goto unmap_and_free_vma;
  		if (vm_flags & VM_EXECUTABLE)
  			added_exe_file_vma(mm);
@@ -77113,7 +77285,7 @@
  
  		/* Can addr have changed??
  		 *
-@@ -1240,6 +1279,9 @@ unmap_and_free_vma:
+@@ -1250,6 +1289,9 @@ unmap_and_free_vma:
  free_vma:
  	kmem_cache_free(vm_area_cachep, vma);
  unacct_error:
@@ -77123,7 +77295,7 @@
  	if (charged)
  		vm_unacct_memory(charged);
  	return error;
-@@ -1570,12 +1612,16 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1580,12 +1622,16 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  	if (is_hugepage_only_range(vma->vm_mm, new_start, size))
  		return -EFAULT;
  
@@ -77141,7 +77313,7 @@
  
  	/* Ok, everything looks good - let it rip */
  	mm->total_vm += grow;
-@@ -1583,6 +1629,11 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1593,6 +1639,11 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  		mm->locked_vm += grow;
  	vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
  	return 0;
@@ -77153,7 +77325,7 @@
  }
  
  #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
-@@ -1869,6 +1920,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1882,6 +1933,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  
  	return 0;
  }
@@ -77161,7 +77333,7 @@
  
  /* Munmap is split into 2 main parts -- this part which finds
   * what needs doing, and the areas themselves, which do the
-@@ -1976,7 +2028,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
+@@ -1989,7 +2041,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
   *  anonymous maps.  eventually we may be able to do some
   *  brk-specific accounting here.
   */
@@ -77170,7 +77342,7 @@
  {
  	struct mm_struct * mm = current->mm;
  	struct vm_area_struct * vma, * prev;
-@@ -2036,8 +2088,11 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2049,8 +2101,11 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	if (mm->map_count > sysctl_max_map_count)
  		return -ENOMEM;
  
@@ -77183,7 +77355,7 @@
  
  	/* Can we just expand an old private anonymous mapping? */
  	vma = vma_merge(mm, prev, addr, addr + len, flags,
-@@ -2048,11 +2103,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2061,11 +2116,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	/*
  	 * create a vma struct for an anonymous mapping
  	 */
@@ -77199,7 +77371,7 @@
  
  	vma->vm_mm = mm;
  	vma->vm_start = addr;
-@@ -2068,8 +2122,19 @@ out:
+@@ -2081,8 +2135,19 @@ out:
  			mm->locked_vm += (len >> PAGE_SHIFT);
  	}
  	return addr;
@@ -77219,7 +77391,7 @@
  EXPORT_SYMBOL(do_brk);
  
  /* Release all mmaps. */
-@@ -2262,10 +2327,11 @@ static void special_mapping_close(struct vm_area_struct *vma)
+@@ -2275,10 +2340,11 @@ static void special_mapping_close(struct vm_area_struct *vma)
  {
  }
  
@@ -78366,7 +78538,7 @@
  	vma->vm_ops = &shmem_vm_ops;
  	return 0;
 diff --git a/mm/slab.c b/mm/slab.c
-index 5d1a782..f23819e 100644
+index c8d466a..3336a30 100644
 --- a/mm/slab.c
 +++ b/mm/slab.c
 @@ -115,30 +115,14 @@
@@ -79708,7 +79880,7 @@
   * swapin_readahead - swap in pages in hope we need them soon
   * @entry: swap entry of this memory
 diff --git a/mm/swapfile.c b/mm/swapfile.c
-index 9c590ee..9ce0143 100644
+index 270e136..07fd436 100644
 --- a/mm/swapfile.c
 +++ b/mm/swapfile.c
 @@ -35,6 +35,8 @@
@@ -79736,7 +79908,7 @@
  
  static DEFINE_MUTEX(swapon_mutex);
  
-@@ -454,7 +460,7 @@ no_page:
+@@ -456,7 +462,7 @@ no_page:
  	return 0;
  }
  
@@ -79745,7 +79917,7 @@
  {
  	struct swap_info_struct *si;
  	pgoff_t offset;
-@@ -475,6 +481,8 @@ swp_entry_t get_swap_page(void)
+@@ -477,6 +483,8 @@ swp_entry_t get_swap_page(void)
  			wrapped++;
  		}
  
@@ -79754,7 +79926,7 @@
  		if (!si->highest_bit)
  			continue;
  		if (!(si->flags & SWP_WRITEOK))
-@@ -485,6 +493,7 @@ swp_entry_t get_swap_page(void)
+@@ -487,6 +495,7 @@ swp_entry_t get_swap_page(void)
  		offset = scan_swap_map(si, SWAP_CACHE);
  		if (offset) {
  			spin_unlock(&swap_lock);
@@ -79762,7 +79934,7 @@
  			return swp_entry(type, offset);
  		}
  		next = swap_list.next;
-@@ -496,6 +505,8 @@ noswap:
+@@ -498,6 +507,8 @@ noswap:
  	return (swp_entry_t) {0};
  }
  
@@ -79771,7 +79943,7 @@
  /* The only caller of this function is now susupend routine */
  swp_entry_t get_swap_page_of_type(int type)
  {
-@@ -504,7 +515,7 @@ swp_entry_t get_swap_page_of_type(int type)
+@@ -506,7 +517,7 @@ swp_entry_t get_swap_page_of_type(int type)
  
  	spin_lock(&swap_lock);
  	si = swap_info + type;
@@ -79780,7 +79952,7 @@
  		nr_swap_pages--;
  		/* This is called for allocating swap entry, not cache */
  		offset = scan_swap_map(si, SWAP_MAP);
-@@ -577,6 +588,7 @@ static int swap_entry_free(struct swap_info_struct *p,
+@@ -579,6 +590,7 @@ static int swap_entry_free(struct swap_info_struct *p,
  	count = p->swap_map[offset];
  	/* free if no reference */
  	if (!count) {
@@ -79788,7 +79960,7 @@
  		if (offset < p->lowest_bit)
  			p->lowest_bit = offset;
  		if (offset > p->highest_bit)
-@@ -606,6 +618,8 @@ void swap_free(swp_entry_t entry)
+@@ -608,6 +620,8 @@ void swap_free(swp_entry_t entry)
  	}
  }
  
@@ -79797,7 +79969,7 @@
  /*
   * Called after dropping swapcache to decrease refcnt to swap entries.
   */
-@@ -690,6 +704,25 @@ int try_to_free_swap(struct page *page)
+@@ -692,6 +706,25 @@ int try_to_free_swap(struct page *page)
  	return 1;
  }
  
@@ -79823,7 +79995,7 @@
  /*
   * Free the swap entry like above, but also try to
   * free the page cache entry if it is the last user.
-@@ -728,6 +761,7 @@ int free_swap_and_cache(swp_entry_t entry)
+@@ -730,6 +763,7 @@ int free_swap_and_cache(swp_entry_t entry)
  	}
  	return p != NULL;
  }
@@ -79831,7 +80003,7 @@
  
  #ifdef CONFIG_HIBERNATION
  /*
-@@ -811,12 +845,14 @@ unsigned int count_swap_pages(int type, int free)
+@@ -813,12 +847,14 @@ unsigned int count_swap_pages(int type, int free)
   * force COW, vm_page_prot omits write permission from any private vma.
   */
  static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
@@ -79847,7 +80019,7 @@
  
  	if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) {
  		ret = -ENOMEM;
-@@ -831,9 +867,11 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
+@@ -833,9 +869,11 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
  		goto out;
  	}
  
@@ -79861,7 +80033,7 @@
  		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
  	page_add_anon_rmap(page, vma, addr);
  	mem_cgroup_commit_charge_swapin(page, ptr);
-@@ -851,7 +889,8 @@ out_nolock:
+@@ -853,7 +891,8 @@ out_nolock:
  
  static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
  				unsigned long addr, unsigned long end,
@@ -79871,7 +80043,7 @@
  {
  	pte_t swp_pte = swp_entry_to_pte(entry);
  	pte_t *pte;
-@@ -874,7 +913,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+@@ -876,7 +915,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
  		 */
  		if (unlikely(pte_same(*pte, swp_pte))) {
  			pte_unmap(pte);
@@ -79880,7 +80052,7 @@
  			if (ret)
  				goto out;
  			pte = pte_offset_map(pmd, addr);
-@@ -887,7 +926,8 @@ out:
+@@ -889,7 +928,8 @@ out:
  
  static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
  				unsigned long addr, unsigned long end,
@@ -79890,7 +80062,7 @@
  {
  	pmd_t *pmd;
  	unsigned long next;
-@@ -898,7 +938,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+@@ -900,7 +940,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
  		next = pmd_addr_end(addr, end);
  		if (pmd_none_or_clear_bad(pmd))
  			continue;
@@ -79899,7 +80071,7 @@
  		if (ret)
  			return ret;
  	} while (pmd++, addr = next, addr != end);
-@@ -907,7 +947,8 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+@@ -909,7 +949,8 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
  
  static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
  				unsigned long addr, unsigned long end,
@@ -79909,7 +80081,7 @@
  {
  	pud_t *pud;
  	unsigned long next;
-@@ -918,7 +959,7 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+@@ -920,7 +961,7 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(pud))
  			continue;
@@ -79918,7 +80090,7 @@
  		if (ret)
  			return ret;
  	} while (pud++, addr = next, addr != end);
-@@ -926,7 +967,8 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+@@ -928,7 +969,8 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
  }
  
  static int unuse_vma(struct vm_area_struct *vma,
@@ -79928,7 +80100,7 @@
  {
  	pgd_t *pgd;
  	unsigned long addr, end, next;
-@@ -948,7 +990,7 @@ static int unuse_vma(struct vm_area_struct *vma,
+@@ -950,7 +992,7 @@ static int unuse_vma(struct vm_area_struct *vma,
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(pgd))
  			continue;
@@ -79937,7 +80109,7 @@
  		if (ret)
  			return ret;
  	} while (pgd++, addr = next, addr != end);
-@@ -956,7 +998,8 @@ static int unuse_vma(struct vm_area_struct *vma,
+@@ -958,7 +1000,8 @@ static int unuse_vma(struct vm_area_struct *vma,
  }
  
  static int unuse_mm(struct mm_struct *mm,
@@ -79947,7 +80119,7 @@
  {
  	struct vm_area_struct *vma;
  	int ret = 0;
-@@ -972,7 +1015,7 @@ static int unuse_mm(struct mm_struct *mm,
+@@ -974,7 +1017,7 @@ static int unuse_mm(struct mm_struct *mm,
  		lock_page(page);
  	}
  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -79956,7 +80128,7 @@
  			break;
  	}
  	up_read(&mm->mmap_sem);
-@@ -1034,6 +1077,7 @@ static int try_to_unuse(unsigned int type)
+@@ -1036,6 +1079,7 @@ static int try_to_unuse(unsigned int type)
  	int retval = 0;
  	int reset_overflow = 0;
  	int shmem;
@@ -79964,7 +80136,7 @@
  
  	/*
  	 * When searching mms for an entry, a good strategy is to
-@@ -1086,6 +1130,13 @@ static int try_to_unuse(unsigned int type)
+@@ -1088,6 +1132,13 @@ static int try_to_unuse(unsigned int type)
  			break;
  		}
  
@@ -79978,7 +80150,7 @@
  		/*
  		 * Don't hold on to start_mm if it looks like exiting.
  		 */
-@@ -1108,6 +1159,20 @@ static int try_to_unuse(unsigned int type)
+@@ -1110,6 +1161,20 @@ static int try_to_unuse(unsigned int type)
  		lock_page(page);
  		wait_on_page_writeback(page);
  
@@ -79999,7 +80171,7 @@
  		/*
  		 * Remove all references to entry.
  		 * Whenever we reach init_mm, there's no address space
-@@ -1119,7 +1184,7 @@ static int try_to_unuse(unsigned int type)
+@@ -1121,7 +1186,7 @@ static int try_to_unuse(unsigned int type)
  			if (start_mm == &init_mm)
  				shmem = shmem_unuse(entry, page);
  			else
@@ -80008,7 +80180,7 @@
  		}
  		if (swap_count(*swap_map)) {
  			int set_start_mm = (*swap_map >= swcount);
-@@ -1149,7 +1214,7 @@ static int try_to_unuse(unsigned int type)
+@@ -1151,7 +1216,7 @@ static int try_to_unuse(unsigned int type)
  					set_start_mm = 1;
  					shmem = shmem_unuse(entry, page);
  				} else
@@ -80017,7 +80189,7 @@
  
  				if (set_start_mm && *swap_map < swcount) {
  					mmput(new_start_mm);
-@@ -1171,6 +1236,8 @@ static int try_to_unuse(unsigned int type)
+@@ -1173,6 +1238,8 @@ static int try_to_unuse(unsigned int type)
  			retval = shmem;
  			break;
  		}
@@ -80026,7 +80198,7 @@
  		if (retval) {
  			unlock_page(page);
  			page_cache_release(page);
-@@ -1518,6 +1585,10 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+@@ -1520,6 +1587,10 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
  	int i, type, prev;
  	int err;
  
@@ -80037,7 +80209,7 @@
  	if (!capable(CAP_SYS_ADMIN))
  		return -EPERM;
  
-@@ -1627,6 +1698,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+@@ -1629,6 +1700,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
  	spin_unlock(&swap_lock);
  	mutex_unlock(&swapon_mutex);
  	vfree(swap_map);
@@ -80045,7 +80217,7 @@
  	/* Destroy swap account informatin */
  	swap_cgroup_swapoff(type);
  
-@@ -1649,6 +1721,8 @@ out:
+@@ -1651,6 +1723,8 @@ out:
  	return err;
  }
  
@@ -80054,7 +80226,7 @@
  #ifdef CONFIG_PROC_FS
  /* iterator */
  static void *swap_start(struct seq_file *swap, loff_t *pos)
-@@ -1729,21 +1803,55 @@ static const struct seq_operations swaps_op = {
+@@ -1731,21 +1805,55 @@ static const struct seq_operations swaps_op = {
  	.show =		swap_show
  };
  
@@ -80112,7 +80284,7 @@
  	return 0;
  }
  __initcall(procswaps_init);
-@@ -1973,6 +2081,11 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+@@ -1975,6 +2083,11 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
  		goto bad_swap;
  	}
  
@@ -80124,7 +80296,7 @@
  	if (p->bdev) {
  		if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
  			p->flags |= SWP_SOLIDSTATE;
-@@ -1991,6 +2104,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+@@ -1993,6 +2106,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
  		p->prio = --least_priority;
  	p->swap_map = swap_map;
  	p->flags |= SWP_WRITEOK;
@@ -80133,7 +80305,7 @@
  	nr_swap_pages += nr_good_pages;
  	total_swap_pages += nr_good_pages;
  
-@@ -2049,6 +2164,8 @@ out:
+@@ -2051,6 +2166,8 @@ out:
  	return error;
  }
  
@@ -80142,7 +80314,7 @@
  void si_swapinfo(struct sysinfo *val)
  {
  	unsigned int i;
-@@ -2146,6 +2263,8 @@ void swap_duplicate(swp_entry_t entry)
+@@ -2148,6 +2265,8 @@ void swap_duplicate(swp_entry_t entry)
  	__swap_duplicate(entry, SWAP_MAP);
  }
  
@@ -80310,7 +80482,7 @@
  static void *s_start(struct seq_file *m, loff_t *pos)
  {
 diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 692807f..a1bd5b6 100644
+index 4649929..a3371d2 100644
 --- a/mm/vmscan.c
 +++ b/mm/vmscan.c
 @@ -41,10 +41,14 @@
@@ -80364,7 +80536,7 @@
  				ClearPageDirty(page);
  				printk("%s: orphaned page\n", __func__);
  				return PAGE_CLEAN;
-@@ -1321,6 +1333,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
+@@ -1358,6 +1370,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
  	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
  	unsigned long nr_rotated = 0;
  
@@ -80372,7 +80544,7 @@
  	lru_add_drain();
  	spin_lock_irq(&zone->lru_lock);
  	nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
-@@ -1394,6 +1407,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
+@@ -1431,6 +1444,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
  						LRU_BASE   + file * LRU_FILE);
  	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
  	spin_unlock_irq(&zone->lru_lock);
@@ -80380,7 +80552,7 @@
  }
  
  static int inactive_anon_is_low_global(struct zone *zone)
-@@ -1636,6 +1650,8 @@ static void shrink_zone(int priority, struct zone *zone,
+@@ -1673,6 +1687,8 @@ static void shrink_zone(int priority, struct zone *zone,
  				nr_reclaimed += shrink_list(l, nr_to_scan,
  							    zone, sc, priority);
  			}
@@ -80389,7 +80561,7 @@
  		}
  		/*
  		 * On large memory systems, scan >> priority can become
-@@ -1714,6 +1730,9 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
+@@ -1751,6 +1767,9 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
  		}
  
  		shrink_zone(priority, zone, sc);
@@ -80399,7 +80571,7 @@
  	}
  }
  
-@@ -1745,10 +1764,13 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+@@ -1782,10 +1801,13 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  	struct zone *zone;
  	enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
  
@@ -80413,7 +80585,7 @@
  	/*
  	 * mem_cgroup will not do shrink_slab.
  	 */
-@@ -1797,6 +1819,11 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+@@ -1834,6 +1856,11 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  			sc->may_writepage = 1;
  		}
  
@@ -80425,7 +80597,7 @@
  		/* Take a nap, wait for some writeback to complete */
  		if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
  			congestion_wait(BLK_RW_ASYNC, HZ/10);
-@@ -1828,6 +1855,7 @@ out:
+@@ -1865,6 +1892,7 @@ out:
  
  	delayacct_freepages_end();
  
@@ -81148,7 +81320,7 @@
  	else
  		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
 diff --git a/net/core/dev.c b/net/core/dev.c
-index 74d0cce..ee00d53 100644
+index 915d0ae..57a9f40 100644
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
 @@ -130,6 +130,9 @@
@@ -81545,10 +81717,10 @@
  		for (dst = dst_busy_list; dst; dst = dst->next) {
  			last = dst;
 diff --git a/net/core/ethtool.c b/net/core/ethtool.c
-index 4c12ddb..59dfa3e 100644
+index 5aef51e..b7d4d7ff 100644
 --- a/net/core/ethtool.c
 +++ b/net/core/ethtool.c
-@@ -954,7 +954,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
+@@ -975,7 +975,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
  	case ETHTOOL_GRXCLSRLALL:
  		break;
  	default:
@@ -81593,7 +81765,7 @@
  		return -ENOMEM;
  	if (copy_from_user(fp->insns, fprog->filter, fsize)) {
 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
-index e587e68..705e8ea 100644
+index e696250..a2bc8f3 100644
 --- a/net/core/neighbour.c
 +++ b/net/core/neighbour.c
 @@ -21,6 +21,8 @@
@@ -81698,7 +81870,7 @@
  }
  
  int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
-@@ -1273,9 +1296,16 @@ static void neigh_proxy_process(unsigned long arg)
+@@ -1276,9 +1299,16 @@ static void neigh_proxy_process(unsigned long arg)
  		if (tdif <= 0) {
  			struct net_device *dev = skb->dev;
  			__skb_unlink(skb, &tbl->proxy_queue);
@@ -82533,7 +82705,7 @@
  		case SIOCGARP:
  			err = copy_from_user(&r, arg, sizeof(struct arpreq));
 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
-index cc35645..6450b63 100644
+index f84f6dd..d297fd9 100644
 --- a/net/ipv4/devinet.c
 +++ b/net/ipv4/devinet.c
 @@ -110,10 +110,11 @@ static inline void devinet_sysctl_unregister(struct in_device *idev)
@@ -84786,7 +84958,7 @@
  				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
  
 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
-index 7cda24b..e141833 100644
+index 7cda24b..e1418334 100644
 --- a/net/ipv4/tcp_ipv4.c
 +++ b/net/ipv4/tcp_ipv4.c
 @@ -72,6 +72,8 @@
@@ -85065,7 +85237,7 @@
  		newtp->pred_flags = 0;
  		newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1;
 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index fcd278a..723de21 100644
+index af83bdf..01b704f 100644
 --- a/net/ipv4/tcp_output.c
 +++ b/net/ipv4/tcp_output.c
 @@ -39,6 +39,9 @@
@@ -85180,7 +85352,7 @@
  			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
  					       4U * tp->advmss);
  
-@@ -2142,6 +2173,7 @@ void tcp_send_fin(struct sock *sk)
+@@ -2145,6 +2176,7 @@ void tcp_send_fin(struct sock *sk)
  				break;
  			yield();
  		}
@@ -85188,7 +85360,7 @@
  
  		/* Reserve space for headers and prepare control bits. */
  		skb_reserve(skb, MAX_TCP_HEADER);
-@@ -2201,6 +2233,10 @@ int tcp_send_synack(struct sock *sk)
+@@ -2204,6 +2236,10 @@ int tcp_send_synack(struct sock *sk)
  			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
  			if (nskb == NULL)
  				return -ENOMEM;
@@ -85199,7 +85371,7 @@
  			tcp_unlink_write_queue(skb, sk);
  			skb_header_release(nskb);
  			__tcp_add_write_queue_head(sk, nskb);
-@@ -2310,6 +2346,7 @@ static void tcp_connect_init(struct sock *sk)
+@@ -2313,6 +2349,7 @@ static void tcp_connect_init(struct sock *sk)
  	struct dst_entry *dst = __sk_dst_get(sk);
  	struct tcp_sock *tp = tcp_sk(sk);
  	__u8 rcv_wscale;
@@ -85207,7 +85379,7 @@
  
  	/* We'll fix this up when we get a response from the other end.
  	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
-@@ -2329,11 +2366,25 @@ static void tcp_connect_init(struct sock *sk)
+@@ -2332,11 +2369,25 @@ static void tcp_connect_init(struct sock *sk)
  	tcp_mtup_init(sk);
  	tcp_sync_mss(sk, dst_mtu(dst));
  
@@ -85233,7 +85405,7 @@
  
  	tcp_initialize_rcv_mss(sk);
  
-@@ -2374,6 +2425,10 @@ int tcp_connect(struct sock *sk)
+@@ -2377,6 +2428,10 @@ int tcp_connect(struct sock *sk)
  	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
  	if (unlikely(buff == NULL))
  		return -ENOBUFS;
@@ -86539,10 +86711,10 @@
  	list_del_rcu(&reg->list);
  	mutex_unlock(&nf_hook_mutex);
 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
-index 27c30cf..c5e4424 100644
+index 95682e5..50e0994 100644
 --- a/net/netfilter/ipvs/ip_vs_conn.c
 +++ b/net/netfilter/ipvs/ip_vs_conn.c
-@@ -1070,7 +1070,7 @@ int __init ip_vs_conn_init(void)
+@@ -1074,7 +1074,7 @@ int __init ip_vs_conn_init(void)
  	/* Allocate ip_vs_conn slab cache */
  	ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
  					      sizeof(struct ip_vs_conn), 0,
@@ -88555,7 +88727,7 @@
  module_init(recent_mt_init);
  module_exit(recent_mt_exit);
 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index 19e9800..c97510c 100644
+index 5a7dcdf..03dd3c1 100644
 --- a/net/netlink/af_netlink.c
 +++ b/net/netlink/af_netlink.c
 @@ -60,29 +60,14 @@
@@ -88696,7 +88868,7 @@
  
  	if (p->failure) {
  		netlink_overrun(sk);
-@@ -1663,6 +1675,10 @@ static int netlink_dump(struct sock *sk)
+@@ -1649,6 +1661,10 @@ static int netlink_dump(struct sock *sk)
  	skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
  	if (!skb)
  		goto errout;
@@ -88834,7 +89006,7 @@
  	}
  }
 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
-index 4ae6aa5..8bc040c 100644
+index 3028001..6a926d8 100644
 --- a/net/sched/sch_generic.c
 +++ b/net/sched/sch_generic.c
 @@ -179,17 +179,23 @@ static inline int qdisc_restart(struct Qdisc *q)
@@ -89401,7 +89573,7 @@
  
  	dprintk("RPC:       %s: returning\n", __func__);
 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
-index 5cdbf7c..a57133e 100644
+index b6fcf68..8907690 100644
 --- a/net/sunrpc/xprtsock.c
 +++ b/net/sunrpc/xprtsock.c
 @@ -72,6 +72,8 @@ static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
@@ -89430,7 +89602,7 @@
  		.procname	= "tcp_fin_timeout",
  		.data		= &xs_tcp_fin_timeout,
  		.maxlen		= sizeof(xs_tcp_fin_timeout),
-@@ -736,16 +748,22 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
+@@ -737,16 +749,22 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
  
  static void xs_reset_transport(struct sock_xprt *transport)
  {
@@ -89458,7 +89630,7 @@
  	sk->sk_user_data = NULL;
  
  	xs_restore_old_callbacks(transport, sk);
-@@ -807,6 +825,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
+@@ -808,6 +826,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
  	xs_close(xprt);
  	xs_free_peer_addresses(xprt);
  	kfree(xprt->slot);
@@ -89466,7 +89638,7 @@
  	kfree(xprt);
  	module_put(THIS_MODULE);
  }
-@@ -1703,7 +1722,12 @@ static void xs_udp_connect_worker4(struct work_struct *work)
+@@ -1709,7 +1728,12 @@ static void xs_udp_connect_worker4(struct work_struct *work)
  	struct rpc_xprt *xprt = &transport->xprt;
  	struct socket *sock = transport->sock;
  	int err, status = -EIO;
@@ -89479,7 +89651,7 @@
  	if (xprt->shutdown)
  		goto out;
  
-@@ -1715,6 +1739,7 @@ static void xs_udp_connect_worker4(struct work_struct *work)
+@@ -1721,6 +1745,7 @@ static void xs_udp_connect_worker4(struct work_struct *work)
  		dprintk("RPC:       can't create UDP transport socket (%d).\n", -err);
  		goto out;
  	}
@@ -89487,7 +89659,7 @@
  	xs_reclassify_socket4(sock);
  
  	if (xs_bind4(transport, sock)) {
-@@ -1733,6 +1758,8 @@ static void xs_udp_connect_worker4(struct work_struct *work)
+@@ -1739,6 +1764,8 @@ static void xs_udp_connect_worker4(struct work_struct *work)
  out:
  	xprt_clear_connecting(xprt);
  	xprt_wake_pending_tasks(xprt, status);
@@ -89496,7 +89668,7 @@
  }
  
  /**
-@@ -1748,7 +1775,12 @@ static void xs_udp_connect_worker6(struct work_struct *work)
+@@ -1754,7 +1781,12 @@ static void xs_udp_connect_worker6(struct work_struct *work)
  	struct rpc_xprt *xprt = &transport->xprt;
  	struct socket *sock = transport->sock;
  	int err, status = -EIO;
@@ -89509,7 +89681,7 @@
  	if (xprt->shutdown)
  		goto out;
  
-@@ -1760,6 +1792,7 @@ static void xs_udp_connect_worker6(struct work_struct *work)
+@@ -1766,6 +1798,7 @@ static void xs_udp_connect_worker6(struct work_struct *work)
  		dprintk("RPC:       can't create UDP transport socket (%d).\n", -err);
  		goto out;
  	}
@@ -89517,7 +89689,7 @@
  	xs_reclassify_socket6(sock);
  
  	if (xs_bind6(transport, sock) < 0) {
-@@ -1778,6 +1811,8 @@ static void xs_udp_connect_worker6(struct work_struct *work)
+@@ -1784,6 +1817,8 @@ static void xs_udp_connect_worker6(struct work_struct *work)
  out:
  	xprt_clear_connecting(xprt);
  	xprt_wake_pending_tasks(xprt, status);
@@ -89526,7 +89698,7 @@
  }
  
  /*
-@@ -1873,7 +1908,12 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
+@@ -1879,7 +1914,12 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
  {
  	struct socket *sock = transport->sock;
  	int status = -EIO;
@@ -89539,7 +89711,7 @@
  	if (xprt->shutdown)
  		goto out;
  
-@@ -1937,6 +1977,8 @@ out_eagain:
+@@ -1943,6 +1983,8 @@ out_eagain:
  out:
  	xprt_clear_connecting(xprt);
  	xprt_wake_pending_tasks(xprt, status);
@@ -89548,7 +89720,7 @@
  }
  
  static struct socket *xs_create_tcp_sock4(struct rpc_xprt *xprt,
-@@ -1952,6 +1994,7 @@ static struct socket *xs_create_tcp_sock4(struct rpc_xprt *xprt,
+@@ -1958,6 +2000,7 @@ static struct socket *xs_create_tcp_sock4(struct rpc_xprt *xprt,
  				-err);
  		goto out_err;
  	}
@@ -89556,7 +89728,7 @@
  	xs_reclassify_socket4(sock);
  
  	if (xs_bind4(transport, sock) < 0) {
-@@ -1991,6 +2034,7 @@ static struct socket *xs_create_tcp_sock6(struct rpc_xprt *xprt,
+@@ -1997,6 +2040,7 @@ static struct socket *xs_create_tcp_sock6(struct rpc_xprt *xprt,
  				-err);
  		goto out_err;
  	}



More information about the Kernel-svn-changes mailing list