[kernel] r14893 - in dists/trunk/linux-2.6/debian: . patches/bugfix/all/stable patches/series

Ben Hutchings benh at alioth.debian.org
Thu Jan 7 02:59:15 UTC 2010


Author: benh
Date: Thu Jan  7 02:59:12 2010
New Revision: 14893

Log:
Add stable release 2.6.32.3

Added:
   dists/trunk/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.3.patch
Modified:
   dists/trunk/linux-2.6/debian/changelog
   dists/trunk/linux-2.6/debian/patches/series/4

Modified: dists/trunk/linux-2.6/debian/changelog
==============================================================================
--- dists/trunk/linux-2.6/debian/changelog	Tue Jan  5 03:14:04 2010	(r14892)
+++ dists/trunk/linux-2.6/debian/changelog	Thu Jan  7 02:59:12 2010	(r14893)
@@ -9,6 +9,10 @@
     (Closes: #508527)
   * dmfe/tulip: Let dmfe handle DM910x except for SPARC on-board chips
     (Closes: #515533)
+  * Add stable release 2.6.32.3:
+    - ath5k: fix SWI calibration interrupt storm (may fix #563466)
+    - iwl3945: disable power save (Closes: #563693)
+    - rt2x00: Disable powersaving for rt61pci and rt2800pci (may fix #561087)
 
   [ maximilian attems ]
   * topconfig set CAN_EMS_USB, BT_MRVL, BT_MRVL_SDIO, BE2ISCSI, SCSI_PMCRAID,

Added: dists/trunk/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.3.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.3.patch	Thu Jan  7 02:59:12 2010	(r14893)
@@ -0,0 +1,4878 @@
+diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
+index af6885c..e1def17 100644
+--- a/Documentation/filesystems/ext4.txt
++++ b/Documentation/filesystems/ext4.txt
+@@ -196,7 +196,7 @@ nobarrier		This also requires an IO stack which can support
+ 			also be used to enable or disable barriers, for
+ 			consistency with other ext4 mount options.
+ 
+-inode_readahead=n	This tuning parameter controls the maximum
++inode_readahead_blks=n	This tuning parameter controls the maximum
+ 			number of inode table blocks that ext4's inode
+ 			table readahead algorithm will pre-read into
+ 			the buffer cache.  The default value is 32 blocks.
+diff --git a/Makefile b/Makefile
+index 23803ce..482dcdd 100644
+diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
+index a5b632e..f0c624f 100644
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -642,10 +642,14 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
+  */
+ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
+ 		       unsigned int areg, struct pt_regs *regs,
+-		       unsigned int flags, unsigned int length)
++		       unsigned int flags, unsigned int length,
++		       unsigned int elsize)
+ {
+ 	char *ptr;
++	unsigned long *lptr;
+ 	int ret = 0;
++	int sw = 0;
++	int i, j;
+ 
+ 	flush_vsx_to_thread(current);
+ 
+@@ -654,19 +658,35 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
+ 	else
+ 		ptr = (char *) &current->thread.vr[reg - 32];
+ 
+-	if (flags & ST)
+-		ret = __copy_to_user(addr, ptr, length);
+-        else {
+-		if (flags & SPLT){
+-			ret = __copy_from_user(ptr, addr, length);
+-			ptr += length;
++	lptr = (unsigned long *) ptr;
++
++	if (flags & SW)
++		sw = elsize-1;
++
++	for (j = 0; j < length; j += elsize) {
++		for (i = 0; i < elsize; ++i) {
++			if (flags & ST)
++				ret |= __put_user(ptr[i^sw], addr + i);
++			else
++				ret |= __get_user(ptr[i^sw], addr + i);
+ 		}
+-		ret |= __copy_from_user(ptr, addr, length);
++		ptr  += elsize;
++		addr += elsize;
+ 	}
+-	if (flags & U)
+-		regs->gpr[areg] = regs->dar;
+-	if (ret)
++
++	if (!ret) {
++		if (flags & U)
++			regs->gpr[areg] = regs->dar;
++
++		/* Splat load copies the same data to top and bottom 8 bytes */
++		if (flags & SPLT)
++			lptr[1] = lptr[0];
++		/* For 8 byte loads, zero the top 8 bytes */
++		else if (!(flags & ST) && (8 == length))
++			lptr[1] = 0;
++	} else
+ 		return -EFAULT;
++
+ 	return 1;
+ }
+ #endif
+@@ -767,16 +787,25 @@ int fix_alignment(struct pt_regs *regs)
+ 
+ #ifdef CONFIG_VSX
+ 	if ((instruction & 0xfc00003e) == 0x7c000018) {
+-		/* Additional register addressing bit (64 VSX vs 32 FPR/GPR */
++		unsigned int elsize;
++
++		/* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
+ 		reg |= (instruction & 0x1) << 5;
+ 		/* Simple inline decoder instead of a table */
++		/* VSX has only 8 and 16 byte memory accesses */
++		nb = 8;
+ 		if (instruction & 0x200)
+ 			nb = 16;
+-		else if (instruction & 0x080)
+-			nb = 8;
+-		else
+-			nb = 4;
++
++		/* Vector stores in little-endian mode swap individual
++		   elements, so process them separately */
++		elsize = 4;
++		if (instruction & 0x80)
++			elsize = 8;
++
+ 		flags = 0;
++		if (regs->msr & MSR_LE)
++			flags |= SW;
+ 		if (instruction & 0x100)
+ 			flags |= ST;
+ 		if (instruction & 0x040)
+@@ -787,7 +816,7 @@ int fix_alignment(struct pt_regs *regs)
+ 			nb = 8;
+ 		}
+ 		PPC_WARN_EMULATED(vsx);
+-		return emulate_vsx(addr, reg, areg, regs, flags, nb);
++		return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
+ 	}
+ #endif
+ 	/* A size of 0 indicates an instruction we don't support, with
+diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
+index 7e2b6ba..0e3e728 100644
+--- a/arch/x86/include/asm/msr.h
++++ b/arch/x86/include/asm/msr.h
+@@ -27,6 +27,18 @@ struct msr {
+ 	};
+ };
+ 
++struct msr_info {
++	u32 msr_no;
++	struct msr reg;
++	struct msr *msrs;
++	int err;
++};
++
++struct msr_regs_info {
++	u32 *regs;
++	int err;
++};
++
+ static inline unsigned long long native_read_tscp(unsigned int *aux)
+ {
+ 	unsigned long low, high;
+@@ -244,11 +256,14 @@ do {                                                            \
+ 
+ #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
+ 
++struct msr *msrs_alloc(void);
++void msrs_free(struct msr *msrs);
++
+ #ifdef CONFIG_SMP
+ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+-void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
+-void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
++void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
++void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
+ int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index c978648..13b1885 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -180,7 +180,7 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
+ 				unsigned int *ecx, unsigned int *edx)
+ {
+ 	/* ecx is often an input as well as an output. */
+-	asm("cpuid"
++	asm volatile("cpuid"
+ 	    : "=a" (*eax),
+ 	      "=b" (*ebx),
+ 	      "=c" (*ecx),
+diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
+index d1414af..e90a8a9 100644
+--- a/arch/x86/include/asm/uv/uv_hub.h
++++ b/arch/x86/include/asm/uv/uv_hub.h
+@@ -31,20 +31,20 @@
+  *		  contiguous (although various IO spaces may punch holes in
+  *		  it)..
+  *
+- * 	N	- Number of bits in the node portion of a socket physical
+- * 		  address.
++ *	N	- Number of bits in the node portion of a socket physical
++ *		  address.
+  *
+- * 	NASID   - network ID of a router, Mbrick or Cbrick. Nasid values of
+- * 	 	  routers always have low bit of 1, C/MBricks have low bit
+- * 		  equal to 0. Most addressing macros that target UV hub chips
+- * 		  right shift the NASID by 1 to exclude the always-zero bit.
+- * 		  NASIDs contain up to 15 bits.
++ *	NASID   - network ID of a router, Mbrick or Cbrick. Nasid values of
++ *		  routers always have low bit of 1, C/MBricks have low bit
++ *		  equal to 0. Most addressing macros that target UV hub chips
++ *		  right shift the NASID by 1 to exclude the always-zero bit.
++ *		  NASIDs contain up to 15 bits.
+  *
+  *	GNODE   - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
+  *		  of nasids.
+  *
+- * 	PNODE   - the low N bits of the GNODE. The PNODE is the most useful variant
+- * 		  of the nasid for socket usage.
++ *	PNODE   - the low N bits of the GNODE. The PNODE is the most useful variant
++ *		  of the nasid for socket usage.
+  *
+  *
+  *  NumaLink Global Physical Address Format:
+@@ -71,12 +71,12 @@
+  *
+  *
+  * APICID format
+- * 	NOTE!!!!!! This is the current format of the APICID. However, code
+- * 	should assume that this will change in the future. Use functions
+- * 	in this file for all APICID bit manipulations and conversion.
++ *	NOTE!!!!!! This is the current format of the APICID. However, code
++ *	should assume that this will change in the future. Use functions
++ *	in this file for all APICID bit manipulations and conversion.
+  *
+- * 		1111110000000000
+- * 		5432109876543210
++ *		1111110000000000
++ *		5432109876543210
+  *		pppppppppplc0cch
+  *		sssssssssss
+  *
+@@ -89,9 +89,9 @@
+  *	Note: Processor only supports 12 bits in the APICID register. The ACPI
+  *	      tables hold all 16 bits. Software needs to be aware of this.
+  *
+- * 	      Unless otherwise specified, all references to APICID refer to
+- * 	      the FULL value contained in ACPI tables, not the subset in the
+- * 	      processor APICID register.
++ *	      Unless otherwise specified, all references to APICID refer to
++ *	      the FULL value contained in ACPI tables, not the subset in the
++ *	      processor APICID register.
+  */
+ 
+ 
+@@ -151,16 +151,16 @@ struct uv_hub_info_s {
+ };
+ 
+ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
+-#define uv_hub_info 		(&__get_cpu_var(__uv_hub_info))
++#define uv_hub_info		(&__get_cpu_var(__uv_hub_info))
+ #define uv_cpu_hub_info(cpu)	(&per_cpu(__uv_hub_info, cpu))
+ 
+ /*
+  * Local & Global MMR space macros.
+- * 	Note: macros are intended to be used ONLY by inline functions
+- * 	in this file - not by other kernel code.
+- * 		n -  NASID (full 15-bit global nasid)
+- * 		g -  GNODE (full 15-bit global nasid, right shifted 1)
+- * 		p -  PNODE (local part of nsids, right shifted 1)
++ *	Note: macros are intended to be used ONLY by inline functions
++ *	in this file - not by other kernel code.
++ *		n -  NASID (full 15-bit global nasid)
++ *		g -  GNODE (full 15-bit global nasid, right shifted 1)
++ *		p -  PNODE (local part of nsids, right shifted 1)
+  */
+ #define UV_NASID_TO_PNODE(n)		(((n) >> 1) & uv_hub_info->pnode_mask)
+ #define UV_PNODE_TO_GNODE(p)		((p) |uv_hub_info->gnode_extra)
+@@ -213,8 +213,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
+ /*
+  * Macros for converting between kernel virtual addresses, socket local physical
+  * addresses, and UV global physical addresses.
+- * 	Note: use the standard __pa() & __va() macros for converting
+- * 	      between socket virtual and socket physical addresses.
++ *	Note: use the standard __pa() & __va() macros for converting
++ *	      between socket virtual and socket physical addresses.
+  */
+ 
+ /* socket phys RAM --> UV global physical address */
+@@ -265,21 +265,18 @@ static inline int uv_apicid_to_pnode(int apicid)
+  * Access global MMRs using the low memory MMR32 space. This region supports
+  * faster MMR access but not all MMRs are accessible in this space.
+  */
+-static inline unsigned long *uv_global_mmr32_address(int pnode,
+-				unsigned long offset)
++static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset)
+ {
+ 	return __va(UV_GLOBAL_MMR32_BASE |
+ 		       UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
+ }
+ 
+-static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
+-				 unsigned long val)
++static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val)
+ {
+ 	writeq(val, uv_global_mmr32_address(pnode, offset));
+ }
+ 
+-static inline unsigned long uv_read_global_mmr32(int pnode,
+-						 unsigned long offset)
++static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset)
+ {
+ 	return readq(uv_global_mmr32_address(pnode, offset));
+ }
+@@ -288,25 +285,32 @@ static inline unsigned long uv_read_global_mmr32(int pnode,
+  * Access Global MMR space using the MMR space located at the top of physical
+  * memory.
+  */
+-static inline unsigned long *uv_global_mmr64_address(int pnode,
+-				unsigned long offset)
++static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset)
+ {
+ 	return __va(UV_GLOBAL_MMR64_BASE |
+ 		    UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
+ }
+ 
+-static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
+-				unsigned long val)
++static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val)
+ {
+ 	writeq(val, uv_global_mmr64_address(pnode, offset));
+ }
+ 
+-static inline unsigned long uv_read_global_mmr64(int pnode,
+-						 unsigned long offset)
++static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset)
+ {
+ 	return readq(uv_global_mmr64_address(pnode, offset));
+ }
+ 
++static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
++{
++	writeb(val, uv_global_mmr64_address(pnode, offset));
++}
++
++static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset)
++{
++	return readb(uv_global_mmr64_address(pnode, offset));
++}
++
+ /*
+  * Access hub local MMRs. Faster than using global space but only local MMRs
+  * are accessible.
+@@ -426,11 +430,17 @@ static inline void uv_set_scir_bits(unsigned char value)
+ 	}
+ }
+ 
++static inline unsigned long uv_scir_offset(int apicid)
++{
++	return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
++}
++
+ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
+ {
+ 	if (uv_cpu_hub_info(cpu)->scir.state != value) {
++		uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
++				uv_cpu_hub_info(cpu)->scir.offset, value);
+ 		uv_cpu_hub_info(cpu)->scir.state = value;
+-		uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value);
+ 	}
+ }
+ 
+diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
+index e0b3130..c8243f0 100644
+--- a/arch/x86/kernel/amd_iommu_init.c
++++ b/arch/x86/kernel/amd_iommu_init.c
+@@ -136,6 +136,11 @@ LIST_HEAD(amd_iommu_list);		/* list of all AMD IOMMUs in the
+ 					   system */
+ 
+ /*
++ * Set to true if ACPI table parsing and hardware intialization went properly
++ */
++static bool amd_iommu_initialized;
++
++/*
+  * Pointer to the device table which is shared by all AMD IOMMUs
+  * it is indexed by the PCI device id or the HT unit id and contains
+  * information about the domain the device belongs to as well as the
+@@ -913,6 +918,8 @@ static int __init init_iommu_all(struct acpi_table_header *table)
+ 	}
+ 	WARN_ON(p != end);
+ 
++	amd_iommu_initialized = true;
++
+ 	return 0;
+ }
+ 
+@@ -1263,6 +1270,9 @@ int __init amd_iommu_init(void)
+ 	if (acpi_table_parse("IVRS", init_iommu_all) != 0)
+ 		goto free;
+ 
++	if (!amd_iommu_initialized)
++		goto free;
++
+ 	if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
+ 		goto free;
+ 
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index 326c254..2ab3535 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -607,8 +607,10 @@ void __init uv_system_init(void)
+ 	uv_rtc_init();
+ 
+ 	for_each_present_cpu(cpu) {
++		int apicid = per_cpu(x86_cpu_to_apicid, cpu);
++
+ 		nid = cpu_to_node(cpu);
+-		pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
++		pnode = uv_apicid_to_pnode(apicid);
+ 		blade = boot_pnode_to_blade(pnode);
+ 		lcpu = uv_blade_info[blade].nr_possible_cpus;
+ 		uv_blade_info[blade].nr_possible_cpus++;
+@@ -629,15 +631,13 @@ void __init uv_system_init(void)
+ 		uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
+ 		uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
+ 		uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
+-		uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
++		uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
+ 		uv_node_to_blade[nid] = blade;
+ 		uv_cpu_to_blade[cpu] = blade;
+ 		max_pnode = max(pnode, max_pnode);
+ 
+-		printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, "
+-			"lcpu %d, blade %d\n",
+-			cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
+-			lcpu, blade);
++		printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
++			cpu, apicid, pnode, nid, lcpu, blade);
+ 	}
+ 
+ 	/* Add blade/pnode info for nodes without cpus */
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index 7b058a2..c06acdd 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -408,14 +408,14 @@ static int genregs_get(struct task_struct *target,
+ {
+ 	if (kbuf) {
+ 		unsigned long *k = kbuf;
+-		while (count > 0) {
++		while (count >= sizeof(*k)) {
+ 			*k++ = getreg(target, pos);
+ 			count -= sizeof(*k);
+ 			pos += sizeof(*k);
+ 		}
+ 	} else {
+ 		unsigned long __user *u = ubuf;
+-		while (count > 0) {
++		while (count >= sizeof(*u)) {
+ 			if (__put_user(getreg(target, pos), u++))
+ 				return -EFAULT;
+ 			count -= sizeof(*u);
+@@ -434,14 +434,14 @@ static int genregs_set(struct task_struct *target,
+ 	int ret = 0;
+ 	if (kbuf) {
+ 		const unsigned long *k = kbuf;
+-		while (count > 0 && !ret) {
++		while (count >= sizeof(*k) && !ret) {
+ 			ret = putreg(target, pos, *k++);
+ 			count -= sizeof(*k);
+ 			pos += sizeof(*k);
+ 		}
+ 	} else {
+ 		const unsigned long  __user *u = ubuf;
+-		while (count > 0 && !ret) {
++		while (count >= sizeof(*u) && !ret) {
+ 			unsigned long word;
+ 			ret = __get_user(word, u++);
+ 			if (ret)
+@@ -1219,14 +1219,14 @@ static int genregs32_get(struct task_struct *target,
+ {
+ 	if (kbuf) {
+ 		compat_ulong_t *k = kbuf;
+-		while (count > 0) {
++		while (count >= sizeof(*k)) {
+ 			getreg32(target, pos, k++);
+ 			count -= sizeof(*k);
+ 			pos += sizeof(*k);
+ 		}
+ 	} else {
+ 		compat_ulong_t __user *u = ubuf;
+-		while (count > 0) {
++		while (count >= sizeof(*u)) {
+ 			compat_ulong_t word;
+ 			getreg32(target, pos, &word);
+ 			if (__put_user(word, u++))
+@@ -1247,14 +1247,14 @@ static int genregs32_set(struct task_struct *target,
+ 	int ret = 0;
+ 	if (kbuf) {
+ 		const compat_ulong_t *k = kbuf;
+-		while (count > 0 && !ret) {
++		while (count >= sizeof(*k) && !ret) {
+ 			ret = putreg32(target, pos, *k++);
+ 			count -= sizeof(*k);
+ 			pos += sizeof(*k);
+ 		}
+ 	} else {
+ 		const compat_ulong_t __user *u = ubuf;
+-		while (count > 0 && !ret) {
++		while (count >= sizeof(*u) && !ret) {
+ 			compat_ulong_t word;
+ 			ret = __get_user(word, u++);
+ 			if (ret)
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 23c2176..41659fb 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1156,6 +1156,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
+ 	hrtimer_cancel(&apic->lapic_timer.timer);
+ 	update_divide_count(apic);
+ 	start_apic_timer(apic);
++	apic->irr_pending = true;
+ }
+ 
+ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index 72558f8..85e12cd 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -455,8 +455,6 @@ out_unlock:
+ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
+ {
+ 	struct kvm_shadow_walk_iterator iterator;
+-	pt_element_t gpte;
+-	gpa_t pte_gpa = -1;
+ 	int level;
+ 	u64 *sptep;
+ 	int need_flush = 0;
+@@ -471,10 +469,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
+ 		if (level == PT_PAGE_TABLE_LEVEL  ||
+ 		    ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
+ 		    ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
+-			struct kvm_mmu_page *sp = page_header(__pa(sptep));
+-
+-			pte_gpa = (sp->gfn << PAGE_SHIFT);
+-			pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
+ 
+ 			if (is_shadow_present_pte(*sptep)) {
+ 				rmap_remove(vcpu->kvm, sptep);
+@@ -493,18 +487,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
+ 	if (need_flush)
+ 		kvm_flush_remote_tlbs(vcpu->kvm);
+ 	spin_unlock(&vcpu->kvm->mmu_lock);
+-
+-	if (pte_gpa == -1)
+-		return;
+-	if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
+-				  sizeof(pt_element_t)))
+-		return;
+-	if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
+-		if (mmu_topup_memory_caches(vcpu))
+-			return;
+-		kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
+-				  sizeof(pt_element_t), 0);
+-	}
+ }
+ 
+ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
+diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
+index 85f5db9..c2b6f39 100644
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -2,14 +2,14 @@
+ # Makefile for x86 specific library files.
+ #
+ 
+-obj-$(CONFIG_SMP) := msr.o
++obj-$(CONFIG_SMP) += msr-smp.o
+ 
+ lib-y := delay.o
+ lib-y += thunk_$(BITS).o
+ lib-y += usercopy_$(BITS).o getuser.o putuser.o
+ lib-y += memcpy_$(BITS).o
+ 
+-obj-y += msr-reg.o msr-reg-export.o
++obj-y += msr.o msr-reg.o msr-reg-export.o
+ 
+ ifeq ($(CONFIG_X86_32),y)
+         obj-y += atomic64_32.o
+diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
+new file mode 100644
+index 0000000..a6b1b86
+--- /dev/null
++++ b/arch/x86/lib/msr-smp.c
+@@ -0,0 +1,204 @@
++#include <linux/module.h>
++#include <linux/preempt.h>
++#include <linux/smp.h>
++#include <asm/msr.h>
++
++static void __rdmsr_on_cpu(void *info)
++{
++	struct msr_info *rv = info;
++	struct msr *reg;
++	int this_cpu = raw_smp_processor_id();
++
++	if (rv->msrs)
++		reg = per_cpu_ptr(rv->msrs, this_cpu);
++	else
++		reg = &rv->reg;
++
++	rdmsr(rv->msr_no, reg->l, reg->h);
++}
++
++static void __wrmsr_on_cpu(void *info)
++{
++	struct msr_info *rv = info;
++	struct msr *reg;
++	int this_cpu = raw_smp_processor_id();
++
++	if (rv->msrs)
++		reg = per_cpu_ptr(rv->msrs, this_cpu);
++	else
++		reg = &rv->reg;
++
++	wrmsr(rv->msr_no, reg->l, reg->h);
++}
++
++int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
++{
++	int err;
++	struct msr_info rv;
++
++	memset(&rv, 0, sizeof(rv));
++
++	rv.msr_no = msr_no;
++	err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
++	*l = rv.reg.l;
++	*h = rv.reg.h;
++
++	return err;
++}
++EXPORT_SYMBOL(rdmsr_on_cpu);
++
++int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
++{
++	int err;
++	struct msr_info rv;
++
++	memset(&rv, 0, sizeof(rv));
++
++	rv.msr_no = msr_no;
++	rv.reg.l = l;
++	rv.reg.h = h;
++	err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
++
++	return err;
++}
++EXPORT_SYMBOL(wrmsr_on_cpu);
++
++static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
++			    struct msr *msrs,
++			    void (*msr_func) (void *info))
++{
++	struct msr_info rv;
++	int this_cpu;
++
++	memset(&rv, 0, sizeof(rv));
++
++	rv.msrs	  = msrs;
++	rv.msr_no = msr_no;
++
++	this_cpu = get_cpu();
++
++	if (cpumask_test_cpu(this_cpu, mask))
++		msr_func(&rv);
++
++	smp_call_function_many(mask, msr_func, &rv, 1);
++	put_cpu();
++}
++
++/* rdmsr on a bunch of CPUs
++ *
++ * @mask:       which CPUs
++ * @msr_no:     which MSR
++ * @msrs:       array of MSR values
++ *
++ */
++void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
++{
++	__rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
++}
++EXPORT_SYMBOL(rdmsr_on_cpus);
++
++/*
++ * wrmsr on a bunch of CPUs
++ *
++ * @mask:       which CPUs
++ * @msr_no:     which MSR
++ * @msrs:       array of MSR values
++ *
++ */
++void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
++{
++	__rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
++}
++EXPORT_SYMBOL(wrmsr_on_cpus);
++
++/* These "safe" variants are slower and should be used when the target MSR
++   may not actually exist. */
++static void __rdmsr_safe_on_cpu(void *info)
++{
++	struct msr_info *rv = info;
++
++	rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
++}
++
++static void __wrmsr_safe_on_cpu(void *info)
++{
++	struct msr_info *rv = info;
++
++	rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
++}
++
++int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
++{
++	int err;
++	struct msr_info rv;
++
++	memset(&rv, 0, sizeof(rv));
++
++	rv.msr_no = msr_no;
++	err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
++	*l = rv.reg.l;
++	*h = rv.reg.h;
++
++	return err ? err : rv.err;
++}
++EXPORT_SYMBOL(rdmsr_safe_on_cpu);
++
++int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
++{
++	int err;
++	struct msr_info rv;
++
++	memset(&rv, 0, sizeof(rv));
++
++	rv.msr_no = msr_no;
++	rv.reg.l = l;
++	rv.reg.h = h;
++	err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
++
++	return err ? err : rv.err;
++}
++EXPORT_SYMBOL(wrmsr_safe_on_cpu);
++
++/*
++ * These variants are significantly slower, but allows control over
++ * the entire 32-bit GPR set.
++ */
++static void __rdmsr_safe_regs_on_cpu(void *info)
++{
++	struct msr_regs_info *rv = info;
++
++	rv->err = rdmsr_safe_regs(rv->regs);
++}
++
++static void __wrmsr_safe_regs_on_cpu(void *info)
++{
++	struct msr_regs_info *rv = info;
++
++	rv->err = wrmsr_safe_regs(rv->regs);
++}
++
++int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
++{
++	int err;
++	struct msr_regs_info rv;
++
++	rv.regs   = regs;
++	rv.err    = -EIO;
++	err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
++
++	return err ? err : rv.err;
++}
++EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
++
++int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
++{
++	int err;
++	struct msr_regs_info rv;
++
++	rv.regs = regs;
++	rv.err  = -EIO;
++	err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
++
++	return err ? err : rv.err;
++}
++EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
+diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
+index 33a1e3c..8f8eebd 100644
+--- a/arch/x86/lib/msr.c
++++ b/arch/x86/lib/msr.c
+@@ -1,226 +1,23 @@
+ #include <linux/module.h>
+ #include <linux/preempt.h>
+-#include <linux/smp.h>
+ #include <asm/msr.h>
+ 
+-struct msr_info {
+-	u32 msr_no;
+-	struct msr reg;
+-	struct msr *msrs;
+-	int off;
+-	int err;
+-};
+-
+-static void __rdmsr_on_cpu(void *info)
+-{
+-	struct msr_info *rv = info;
+-	struct msr *reg;
+-	int this_cpu = raw_smp_processor_id();
+-
+-	if (rv->msrs)
+-		reg = &rv->msrs[this_cpu - rv->off];
+-	else
+-		reg = &rv->reg;
+-
+-	rdmsr(rv->msr_no, reg->l, reg->h);
+-}
+-
+-static void __wrmsr_on_cpu(void *info)
+-{
+-	struct msr_info *rv = info;
+-	struct msr *reg;
+-	int this_cpu = raw_smp_processor_id();
+-
+-	if (rv->msrs)
+-		reg = &rv->msrs[this_cpu - rv->off];
+-	else
+-		reg = &rv->reg;
+-
+-	wrmsr(rv->msr_no, reg->l, reg->h);
+-}
+-
+-int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+-{
+-	int err;
+-	struct msr_info rv;
+-
+-	memset(&rv, 0, sizeof(rv));
+-
+-	rv.msr_no = msr_no;
+-	err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
+-	*l = rv.reg.l;
+-	*h = rv.reg.h;
+-
+-	return err;
+-}
+-EXPORT_SYMBOL(rdmsr_on_cpu);
+-
+-int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+-{
+-	int err;
+-	struct msr_info rv;
+-
+-	memset(&rv, 0, sizeof(rv));
+-
+-	rv.msr_no = msr_no;
+-	rv.reg.l = l;
+-	rv.reg.h = h;
+-	err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
+-
+-	return err;
+-}
+-EXPORT_SYMBOL(wrmsr_on_cpu);
+-
+-/* rdmsr on a bunch of CPUs
+- *
+- * @mask:       which CPUs
+- * @msr_no:     which MSR
+- * @msrs:       array of MSR values
+- *
+- */
+-void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
+-{
+-	struct msr_info rv;
+-	int this_cpu;
+-
+-	memset(&rv, 0, sizeof(rv));
+-
+-	rv.off    = cpumask_first(mask);
+-	rv.msrs	  = msrs;
+-	rv.msr_no = msr_no;
+-
+-	this_cpu = get_cpu();
+-
+-	if (cpumask_test_cpu(this_cpu, mask))
+-		__rdmsr_on_cpu(&rv);
+-
+-	smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
+-	put_cpu();
+-}
+-EXPORT_SYMBOL(rdmsr_on_cpus);
+-
+-/*
+- * wrmsr on a bunch of CPUs
+- *
+- * @mask:       which CPUs
+- * @msr_no:     which MSR
+- * @msrs:       array of MSR values
+- *
+- */
+-void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
+-{
+-	struct msr_info rv;
+-	int this_cpu;
+-
+-	memset(&rv, 0, sizeof(rv));
+-
+-	rv.off    = cpumask_first(mask);
+-	rv.msrs   = msrs;
+-	rv.msr_no = msr_no;
+-
+-	this_cpu = get_cpu();
+-
+-	if (cpumask_test_cpu(this_cpu, mask))
+-		__wrmsr_on_cpu(&rv);
+-
+-	smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
+-	put_cpu();
+-}
+-EXPORT_SYMBOL(wrmsr_on_cpus);
+-
+-/* These "safe" variants are slower and should be used when the target MSR
+-   may not actually exist. */
+-static void __rdmsr_safe_on_cpu(void *info)
+-{
+-	struct msr_info *rv = info;
+-
+-	rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
+-}
+-
+-static void __wrmsr_safe_on_cpu(void *info)
+-{
+-	struct msr_info *rv = info;
+-
+-	rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
+-}
+-
+-int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
++struct msr *msrs_alloc(void)
+ {
+-	int err;
+-	struct msr_info rv;
++	struct msr *msrs = NULL;
+ 
+-	memset(&rv, 0, sizeof(rv));
++	msrs = alloc_percpu(struct msr);
++	if (!msrs) {
++		pr_warning("%s: error allocating msrs\n", __func__);
++		return NULL;
++	}
+ 
+-	rv.msr_no = msr_no;
+-	err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
+-	*l = rv.reg.l;
+-	*h = rv.reg.h;
+-
+-	return err ? err : rv.err;
++	return msrs;
+ }
+-EXPORT_SYMBOL(rdmsr_safe_on_cpu);
++EXPORT_SYMBOL(msrs_alloc);
+ 
+-int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
++void msrs_free(struct msr *msrs)
+ {
+-	int err;
+-	struct msr_info rv;
+-
+-	memset(&rv, 0, sizeof(rv));
+-
+-	rv.msr_no = msr_no;
+-	rv.reg.l = l;
+-	rv.reg.h = h;
+-	err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
+-
+-	return err ? err : rv.err;
+-}
+-EXPORT_SYMBOL(wrmsr_safe_on_cpu);
+-
+-/*
+- * These variants are significantly slower, but allows control over
+- * the entire 32-bit GPR set.
+- */
+-struct msr_regs_info {
+-	u32 *regs;
+-	int err;
+-};
+-
+-static void __rdmsr_safe_regs_on_cpu(void *info)
+-{
+-	struct msr_regs_info *rv = info;
+-
+-	rv->err = rdmsr_safe_regs(rv->regs);
+-}
+-
+-static void __wrmsr_safe_regs_on_cpu(void *info)
+-{
+-	struct msr_regs_info *rv = info;
+-
+-	rv->err = wrmsr_safe_regs(rv->regs);
+-}
+-
+-int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+-{
+-	int err;
+-	struct msr_regs_info rv;
+-
+-	rv.regs   = regs;
+-	rv.err    = -EIO;
+-	err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
+-
+-	return err ? err : rv.err;
+-}
+-EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
+-
+-int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+-{
+-	int err;
+-	struct msr_regs_info rv;
+-
+-	rv.regs = regs;
+-	rv.err  = -EIO;
+-	err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
+-
+-	return err ? err : rv.err;
++	free_percpu(msrs);
+ }
+-EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
++EXPORT_SYMBOL(msrs_free);
+diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
+index 0c9c6a9..8a95e83 100644
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -282,6 +282,13 @@ static int acpi_lid_send_state(struct acpi_device *device)
+ 	if (ret == NOTIFY_DONE)
+ 		ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
+ 						   device);
++	if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
++		/*
++		 * It is also regarded as success if the notifier_chain
++		 * returns NOTIFY_OK or NOTIFY_DONE.
++		 */
++		ret = 0;
++	}
+ 	return ret;
+ }
+ 
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index baef28c..7511029 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -916,6 +916,7 @@ static int ec_validate_ecdt(const struct dmi_system_id *id)
+ /* MSI EC needs special treatment, enable it */
+ static int ec_flag_msi(const struct dmi_system_id *id)
+ {
++	printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
+ 	EC_FLAGS_MSI = 1;
+ 	EC_FLAGS_VALIDATE_ECDT = 1;
+ 	return 0;
+@@ -928,8 +929,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ 	DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
+ 	{
+ 	ec_flag_msi, "MSI hardware", {
+-	DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"),
+-	DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL},
++	DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL},
++	{
++	ec_flag_msi, "MSI hardware", {
++	DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL},
++	{
++	ec_flag_msi, "MSI hardware", {
++	DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
+ 	{
+ 	ec_validate_ecdt, "ASUS hardware", {
+ 	DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
+diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
+index f98dffe..f0bad9b 100644
+--- a/drivers/ata/pata_cmd64x.c
++++ b/drivers/ata/pata_cmd64x.c
+@@ -219,7 +219,7 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+ 		regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
+ 		/* Merge the control bits */
+ 		regU |= 1 << adev->devno; /* UDMA on */
+-		if (adev->dma_mode > 2)	/* 15nS timing */
++		if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */
+ 			regU |= 4 << adev->devno;
+ 	} else {
+ 		regU &= ~ (1 << adev->devno);	/* UDMA off */
+diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
+index 21c5bd6..d16e87e 100644
+--- a/drivers/ata/pata_hpt3x2n.c
++++ b/drivers/ata/pata_hpt3x2n.c
+@@ -8,7 +8,7 @@
+  * Copyright (C) 1999-2003		Andre Hedrick <andre at linux-ide.org>
+  * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
+  * Portions Copyright (C) 2003		Red Hat Inc
+- * Portions Copyright (C) 2005-2007	MontaVista Software, Inc.
++ * Portions Copyright (C) 2005-2009	MontaVista Software, Inc.
+  *
+  *
+  * TODO
+@@ -25,7 +25,7 @@
+ #include <linux/libata.h>
+ 
+ #define DRV_NAME	"pata_hpt3x2n"
+-#define DRV_VERSION	"0.3.7"
++#define DRV_VERSION	"0.3.8"
+ 
+ enum {
+ 	HPT_PCI_FAST	=	(1 << 31),
+@@ -262,7 +262,7 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
+ 
+ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
+ {
+-	void __iomem *bmdma = ap->ioaddr.bmdma_addr;
++	void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8;
+ 
+ 	/* Tristate the bus */
+ 	iowrite8(0x80, bmdma+0x73);
+@@ -272,9 +272,9 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
+ 	iowrite8(source, bmdma+0x7B);
+ 	iowrite8(0xC0, bmdma+0x79);
+ 
+-	/* Reset state machines */
+-	iowrite8(0x37, bmdma+0x70);
+-	iowrite8(0x37, bmdma+0x74);
++	/* Reset state machines, avoid enabling the disabled channels */
++	iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70);
++	iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74);
+ 
+ 	/* Complete reset */
+ 	iowrite8(0x00, bmdma+0x79);
+@@ -284,21 +284,10 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
+ 	iowrite8(0x00, bmdma+0x77);
+ }
+ 
+-/* Check if our partner interface is busy */
+-
+-static int hpt3x2n_pair_idle(struct ata_port *ap)
+-{
+-	struct ata_host *host = ap->host;
+-	struct ata_port *pair = host->ports[ap->port_no ^ 1];
+-
+-	if (pair->hsm_task_state == HSM_ST_IDLE)
+-		return 1;
+-	return 0;
+-}
+-
+ static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
+ {
+ 	long flags = (long)ap->host->private_data;
++
+ 	/* See if we should use the DPLL */
+ 	if (writing)
+ 		return USE_DPLL;	/* Needed for write */
+@@ -307,20 +296,35 @@ static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
+ 	return 0;
+ }
+ 
++static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc)
++{
++	struct ata_port *ap = qc->ap;
++	struct ata_port *alt = ap->host->ports[ap->port_no ^ 1];
++	int rc, flags = (long)ap->host->private_data;
++	int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
++
++	/* First apply the usual rules */
++	rc = ata_std_qc_defer(qc);
++	if (rc != 0)
++		return rc;
++
++	if ((flags & USE_DPLL) != dpll && alt->qc_active)
++		return ATA_DEFER_PORT;
++	return 0;
++}
++
+ static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
+ {
+-	struct ata_taskfile *tf = &qc->tf;
+ 	struct ata_port *ap = qc->ap;
+ 	int flags = (long)ap->host->private_data;
++	int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
+ 
+-	if (hpt3x2n_pair_idle(ap)) {
+-		int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
+-		if ((flags & USE_DPLL) != dpll) {
+-			if (dpll == 1)
+-				hpt3x2n_set_clock(ap, 0x21);
+-			else
+-				hpt3x2n_set_clock(ap, 0x23);
+-		}
++	if ((flags & USE_DPLL) != dpll) {
++		flags &= ~USE_DPLL;
++		flags |= dpll;
++		ap->host->private_data = (void *)(long)flags;
++
++		hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
+ 	}
+ 	return ata_sff_qc_issue(qc);
+ }
+@@ -337,6 +341,8 @@ static struct ata_port_operations hpt3x2n_port_ops = {
+ 	.inherits	= &ata_bmdma_port_ops,
+ 
+ 	.bmdma_stop	= hpt3x2n_bmdma_stop,
++
++	.qc_defer	= hpt3x2n_qc_defer,
+ 	.qc_issue	= hpt3x2n_qc_issue,
+ 
+ 	.cable_detect	= hpt3x2n_cable_detect,
+@@ -454,7 +460,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+ 	unsigned int f_low, f_high;
+ 	int adjust;
+ 	unsigned long iobase = pci_resource_start(dev, 4);
+-	void *hpriv = NULL;
++	void *hpriv = (void *)USE_DPLL;
+ 	int rc;
+ 
+ 	rc = pcim_enable_device(dev);
+@@ -542,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+ 	/* Set our private data up. We only need a few flags so we use
+ 	   it directly */
+ 	if (pci_mhz > 60) {
+-		hpriv = (void *)PCI66;
++		hpriv = (void *)(PCI66 | USE_DPLL);
+ 		/*
+ 		 * On  HPT371N, if ATA clock is 66 MHz we must set bit 2 in
+ 		 * the MISC. register to stretch the UltraDMA Tss timing.
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 44bc8bb..1be7631 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -307,6 +307,7 @@ static void btusb_bulk_complete(struct urb *urb)
+ 		return;
+ 
+ 	usb_anchor_urb(urb, &data->bulk_anchor);
++	usb_mark_last_busy(data->udev);
+ 
+ 	err = usb_submit_urb(urb, GFP_ATOMIC);
+ 	if (err < 0) {
+diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
+index 7585c41..c558fa1 100644
+--- a/drivers/dma/at_hdmac.c
++++ b/drivers/dma/at_hdmac.c
+@@ -815,7 +815,7 @@ atc_is_tx_complete(struct dma_chan *chan,
+ 	dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
+ 			cookie, done ? *done : 0, used ? *used : 0);
+ 
+-	spin_lock_bh(atchan->lock);
++	spin_lock_bh(&atchan->lock);
+ 
+ 	last_complete = atchan->completed_cookie;
+ 	last_used = chan->cookie;
+@@ -830,7 +830,7 @@ atc_is_tx_complete(struct dma_chan *chan,
+ 		ret = dma_async_is_complete(cookie, last_complete, last_used);
+ 	}
+ 
+-	spin_unlock_bh(atchan->lock);
++	spin_unlock_bh(&atchan->lock);
+ 
+ 	if (done)
+ 		*done = last_complete;
+diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
+index c524d36..dcc4ab7 100644
+--- a/drivers/dma/ioat/dma.c
++++ b/drivers/dma/ioat/dma.c
+@@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device)
+ 	dma->dev = &pdev->dev;
+ 
+ 	if (!dma->chancnt) {
+-		dev_err(dev, "zero channels detected\n");
++		dev_err(dev, "channel enumeration error\n");
+ 		goto err_setup_interrupts;
+ 	}
+ 
+diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
+index 45edde9..bbc3e78 100644
+--- a/drivers/dma/ioat/dma.h
++++ b/drivers/dma/ioat/dma.h
+@@ -60,6 +60,7 @@
+  * @dca: direct cache access context
+  * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
+  * @enumerate_channels: hw version specific channel enumeration
++ * @reset_hw: hw version specific channel (re)initialization
+  * @cleanup_tasklet: select between the v2 and v3 cleanup routines
+  * @timer_fn: select between the v2 and v3 timer watchdog routines
+  * @self_test: hardware version specific self test for each supported op type
+@@ -78,6 +79,7 @@ struct ioatdma_device {
+ 	struct dca_provider *dca;
+ 	void (*intr_quirk)(struct ioatdma_device *device);
+ 	int (*enumerate_channels)(struct ioatdma_device *device);
++	int (*reset_hw)(struct ioat_chan_common *chan);
+ 	void (*cleanup_tasklet)(unsigned long data);
+ 	void (*timer_fn)(unsigned long data);
+ 	int (*self_test)(struct ioatdma_device *device);
+@@ -264,6 +266,22 @@ static inline void ioat_suspend(struct ioat_chan_common *chan)
+ 	writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+ }
+ 
++static inline void ioat_reset(struct ioat_chan_common *chan)
++{
++	u8 ver = chan->device->version;
++
++	writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
++}
++
++static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
++{
++	u8 ver = chan->device->version;
++	u8 cmd;
++
++	cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
++	return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
++}
++
+ static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
+ {
+ 	struct ioat_chan_common *chan = &ioat->base;
+diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
+index 8f1f7f0..5f7a500 100644
+--- a/drivers/dma/ioat/dma_v2.c
++++ b/drivers/dma/ioat/dma_v2.c
+@@ -239,20 +239,50 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
+ 		__ioat2_start_null_desc(ioat);
+ }
+ 
+-static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
++int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
+ {
+-	struct ioat_chan_common *chan = &ioat->base;
+-	unsigned long phys_complete;
++	unsigned long end = jiffies + tmo;
++	int err = 0;
+ 	u32 status;
+ 
+ 	status = ioat_chansts(chan);
+ 	if (is_ioat_active(status) || is_ioat_idle(status))
+ 		ioat_suspend(chan);
+ 	while (is_ioat_active(status) || is_ioat_idle(status)) {
++		if (end && time_after(jiffies, end)) {
++			err = -ETIMEDOUT;
++			break;
++		}
+ 		status = ioat_chansts(chan);
+ 		cpu_relax();
+ 	}
+ 
++	return err;
++}
++
++int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
++{
++	unsigned long end = jiffies + tmo;
++	int err = 0;
++
++	ioat_reset(chan);
++	while (ioat_reset_pending(chan)) {
++		if (end && time_after(jiffies, end)) {
++			err = -ETIMEDOUT;
++			break;
++		}
++		cpu_relax();
++	}
++
++	return err;
++}
++
++static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
++{
++	struct ioat_chan_common *chan = &ioat->base;
++	unsigned long phys_complete;
++
++	ioat2_quiesce(chan, 0);
+ 	if (ioat_cleanup_preamble(chan, &phys_complete))
+ 		__cleanup(ioat, phys_complete);
+ 
+@@ -318,6 +348,19 @@ void ioat2_timer_event(unsigned long data)
+ 	spin_unlock_bh(&chan->cleanup_lock);
+ }
+ 
++static int ioat2_reset_hw(struct ioat_chan_common *chan)
++{
++	/* throw away whatever the channel was doing and get it initialized */
++	u32 chanerr;
++
++	ioat2_quiesce(chan, msecs_to_jiffies(100));
++
++	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
++	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
++
++	return ioat2_reset_sync(chan, msecs_to_jiffies(200));
++}
++
+ /**
+  * ioat2_enumerate_channels - find and initialize the device's channels
+  * @device: the device to be enumerated
+@@ -360,6 +403,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
+ 				  (unsigned long) ioat);
+ 		ioat->xfercap_log = xfercap_log;
+ 		spin_lock_init(&ioat->ring_lock);
++		if (device->reset_hw(&ioat->base)) {
++			i = 0;
++			break;
++		}
+ 	}
+ 	dma->chancnt = i;
+ 	return i;
+@@ -467,7 +514,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
+ 	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ 	struct ioat_chan_common *chan = &ioat->base;
+ 	struct ioat_ring_ent **ring;
+-	u32 chanerr;
+ 	int order;
+ 
+ 	/* have we already been set up? */
+@@ -477,12 +523,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
+ 	/* Setup register to interrupt and write completion status on error */
+ 	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
+ 
+-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+-	if (chanerr) {
+-		dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
+-		writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+-	}
+-
+ 	/* allocate a completion writeback area */
+ 	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+ 	chan->completion = pci_pool_alloc(chan->device->completion_pool,
+@@ -746,13 +786,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
+ 	tasklet_disable(&chan->cleanup_task);
+ 	del_timer_sync(&chan->timer);
+ 	device->cleanup_tasklet((unsigned long) ioat);
+-
+-	/* Delay 100ms after reset to allow internal DMA logic to quiesce
+-	 * before removing DMA descriptor resources.
+-	 */
+-	writeb(IOAT_CHANCMD_RESET,
+-	       chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
+-	mdelay(100);
++	device->reset_hw(chan);
+ 
+ 	spin_lock_bh(&ioat->ring_lock);
+ 	descs = ioat2_ring_space(ioat);
+@@ -839,6 +873,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
+ 	int err;
+ 
+ 	device->enumerate_channels = ioat2_enumerate_channels;
++	device->reset_hw = ioat2_reset_hw;
+ 	device->cleanup_tasklet = ioat2_cleanup_tasklet;
+ 	device->timer_fn = ioat2_timer_event;
+ 	device->self_test = ioat_dma_self_test;
+diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
+index 1d849ef..3afad8d 100644
+--- a/drivers/dma/ioat/dma_v2.h
++++ b/drivers/dma/ioat/dma_v2.h
+@@ -185,6 +185,8 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
+ void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
+ void ioat2_cleanup_tasklet(unsigned long data);
+ void ioat2_timer_event(unsigned long data);
++int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
++int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
+ extern struct kobj_type ioat2_ktype;
+ extern struct kmem_cache *ioat2_cache;
+ #endif /* IOATDMA_V2_H */
+diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
+index 42f6f10..9908c9e 100644
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -650,9 +650,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
+ 
+ 	num_descs = ioat2_xferlen_to_descs(ioat, len);
+ 	/* we need 2x the number of descriptors to cover greater than 3
+-	 * sources
++	 * sources (we need 1 extra source in the q-only continuation
++	 * case and 3 extra sources in the p+q continuation case.
+ 	 */
+-	if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) {
++	if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
++	    (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
+ 		with_ext = 1;
+ 		num_descs *= 2;
+ 	} else
+@@ -1128,6 +1130,45 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
+ 	return 0;
+ }
+ 
++static int ioat3_reset_hw(struct ioat_chan_common *chan)
++{
++	/* throw away whatever the channel was doing and get it
++	 * initialized, with ioat3 specific workarounds
++	 */
++	struct ioatdma_device *device = chan->device;
++	struct pci_dev *pdev = device->pdev;
++	u32 chanerr;
++	u16 dev_id;
++	int err;
++
++	ioat2_quiesce(chan, msecs_to_jiffies(100));
++
++	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
++	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
++
++	/* -= IOAT ver.3 workarounds =- */
++	/* Write CHANERRMSK_INT with 3E07h to mask out the errors
++	 * that can cause stability issues for IOAT ver.3, and clear any
++	 * pending errors
++	 */
++	pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
++	err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
++	if (err) {
++		dev_err(&pdev->dev, "channel error register unreachable\n");
++		return err;
++	}
++	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
++
++	/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
++	 * (workaround for spurious config parity error after restart)
++	 */
++	pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
++	if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
++		pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
++
++	return ioat2_reset_sync(chan, msecs_to_jiffies(200));
++}
++
+ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
+ {
+ 	struct pci_dev *pdev = device->pdev;
+@@ -1137,10 +1178,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
+ 	struct ioat_chan_common *chan;
+ 	bool is_raid_device = false;
+ 	int err;
+-	u16 dev_id;
+ 	u32 cap;
+ 
+ 	device->enumerate_channels = ioat2_enumerate_channels;
++	device->reset_hw = ioat3_reset_hw;
+ 	device->self_test = ioat3_dma_self_test;
+ 	dma = &device->common;
+ 	dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
+@@ -1216,19 +1257,6 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
+ 	dma->device_prep_dma_xor_val = NULL;
+ 	#endif
+ 
+-	/* -= IOAT ver.3 workarounds =- */
+-	/* Write CHANERRMSK_INT with 3E07h to mask out the errors
+-	 * that can cause stability issues for IOAT ver.3
+-	 */
+-	pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
+-
+-	/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+-	 * (workaround for spurious config parity error after restart)
+-	 */
+-	pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+-	if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
+-		pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+-
+ 	err = ioat_probe(device);
+ 	if (err)
+ 		return err;
+diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
+index f015ec1..e8ae63b 100644
+--- a/drivers/dma/ioat/registers.h
++++ b/drivers/dma/ioat/registers.h
+@@ -27,6 +27,7 @@
+ 
+ #define IOAT_PCI_DEVICE_ID_OFFSET		0x02
+ #define IOAT_PCI_DMAUNCERRSTS_OFFSET		0x148
++#define IOAT_PCI_CHANERR_INT_OFFSET		0x180
+ #define IOAT_PCI_CHANERRMASK_INT_OFFSET		0x184
+ 
+ /* MMIO Device Registers */
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index a38831c..a0bcfba 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -13,6 +13,8 @@ module_param(report_gart_errors, int, 0644);
+ static int ecc_enable_override;
+ module_param(ecc_enable_override, int, 0644);
+ 
++static struct msr *msrs;
++
+ /* Lookup table for all possible MC control instances */
+ struct amd64_pvt;
+ static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
+@@ -2618,6 +2620,90 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
+ 	return empty;
+ }
+ 
++/* get all cores on this DCT */
++static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
++{
++	int cpu;
++
++	for_each_online_cpu(cpu)
++		if (amd_get_nb_id(cpu) == nid)
++			cpumask_set_cpu(cpu, mask);
++}
++
++/* check MCG_CTL on all the cpus on this node */
++static bool amd64_nb_mce_bank_enabled_on_node(int nid)
++{
++	cpumask_var_t mask;
++	int cpu, nbe;
++	bool ret = false;
++
++	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
++		amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
++			     __func__);
++		return false;
++	}
++
++	get_cpus_on_this_dct_cpumask(mask, nid);
++
++	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
++
++	for_each_cpu(cpu, mask) {
++		struct msr *reg = per_cpu_ptr(msrs, cpu);
++		nbe = reg->l & K8_MSR_MCGCTL_NBE;
++
++		debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
++			cpu, reg->q,
++			(nbe ? "enabled" : "disabled"));
++
++		if (!nbe)
++			goto out;
++	}
++	ret = true;
++
++out:
++	free_cpumask_var(mask);
++	return ret;
++}
++
++static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
++{
++	cpumask_var_t cmask;
++	int cpu;
++
++	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
++		amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
++			     __func__);
++		return false;
++	}
++
++	get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
++
++	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
++
++	for_each_cpu(cpu, cmask) {
++
++		struct msr *reg = per_cpu_ptr(msrs, cpu);
++
++		if (on) {
++			if (reg->l & K8_MSR_MCGCTL_NBE)
++				pvt->flags.ecc_report = 1;
++
++			reg->l |= K8_MSR_MCGCTL_NBE;
++		} else {
++			/*
++			 * Turn off ECC reporting only when it was off before
++			 */
++			if (!pvt->flags.ecc_report)
++				reg->l &= ~K8_MSR_MCGCTL_NBE;
++		}
++	}
++	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
++
++	free_cpumask_var(cmask);
++
++	return 0;
++}
++
+ /*
+  * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
+  * enable it.
+@@ -2625,17 +2711,12 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
+ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
+ {
+ 	struct amd64_pvt *pvt = mci->pvt_info;
+-	const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
+-	int cpu, idx = 0, err = 0;
+-	struct msr msrs[cpumask_weight(cpumask)];
+-	u32 value;
+-	u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
++	int err = 0;
++	u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ 
+ 	if (!ecc_enable_override)
+ 		return;
+ 
+-	memset(msrs, 0, sizeof(msrs));
+-
+ 	amd64_printk(KERN_WARNING,
+ 		"'ecc_enable_override' parameter is active, "
+ 		"Enabling AMD ECC hardware now: CAUTION\n");
+@@ -2651,16 +2732,9 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
+ 	value |= mask;
+ 	pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+ 
+-	rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+-
+-	for_each_cpu(cpu, cpumask) {
+-		if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
+-			set_bit(idx, &pvt->old_mcgctl);
+-
+-		msrs[idx].l |= K8_MSR_MCGCTL_NBE;
+-		idx++;
+-	}
+-	wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
++	if (amd64_toggle_ecc_err_reporting(pvt, ON))
++		amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
++					   "MCGCTL!\n");
+ 
+ 	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
+ 	if (err)
+@@ -2701,17 +2775,12 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
+ 
+ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
+ {
+-	const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
+-	int cpu, idx = 0, err = 0;
+-	struct msr msrs[cpumask_weight(cpumask)];
+-	u32 value;
+-	u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
++	int err = 0;
++	u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ 
+ 	if (!pvt->nbctl_mcgctl_saved)
+ 		return;
+ 
+-	memset(msrs, 0, sizeof(msrs));
+-
+ 	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
+ 	if (err)
+ 		debugf0("Reading K8_NBCTL failed\n");
+@@ -2721,66 +2790,9 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
+ 	/* restore the NB Enable MCGCTL bit */
+ 	pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+ 
+-	rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+-
+-	for_each_cpu(cpu, cpumask) {
+-		msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
+-		msrs[idx].l |=
+-			test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
+-		idx++;
+-	}
+-
+-	wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+-}
+-
+-/* get all cores on this DCT */
+-static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid)
+-{
+-	int cpu;
+-
+-	for_each_online_cpu(cpu)
+-		if (amd_get_nb_id(cpu) == nid)
+-			cpumask_set_cpu(cpu, mask);
+-}
+-
+-/* check MCG_CTL on all the cpus on this node */
+-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+-{
+-	cpumask_t mask;
+-	struct msr *msrs;
+-	int cpu, nbe, idx = 0;
+-	bool ret = false;
+-
+-	cpumask_clear(&mask);
+-
+-	get_cpus_on_this_dct_cpumask(&mask, nid);
+-
+-	msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL);
+-	if (!msrs) {
+-		amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
+-			      __func__);
+-		 return false;
+-	}
+-
+-	rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs);
+-
+-	for_each_cpu(cpu, &mask) {
+-		nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
+-
+-		debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+-			cpu, msrs[idx].q,
+-			(nbe ? "enabled" : "disabled"));
+-
+-		if (!nbe)
+-			goto out;
+-
+-		idx++;
+-	}
+-	ret = true;
+-
+-out:
+-	kfree(msrs);
+-	return ret;
++	if (amd64_toggle_ecc_err_reporting(pvt, OFF))
++		amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
++					   "MCGCTL!\n");
+ }
+ 
+ /*
+@@ -2824,9 +2836,8 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
+ 			amd64_printk(KERN_WARNING, "%s", ecc_warning);
+ 			return -ENODEV;
+ 		}
+-	} else
+-		/* CLEAR the override, since BIOS controlled it */
+ 		ecc_enable_override = 0;
++	}
+ 
+ 	return 0;
+ }
+@@ -2909,7 +2920,6 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
+ 	pvt->ext_model		= boot_cpu_data.x86_model >> 4;
+ 	pvt->mc_type_index	= mc_type_index;
+ 	pvt->ops		= family_ops(mc_type_index);
+-	pvt->old_mcgctl		= 0;
+ 
+ 	/*
+ 	 * We have the dram_f2_ctl device as an argument, now go reserve its
+@@ -3071,16 +3081,15 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
+ 
+ 	amd64_free_mc_sibling_devices(pvt);
+ 
+-	kfree(pvt);
+-	mci->pvt_info = NULL;
+-
+-	mci_lookup[pvt->mc_node_id] = NULL;
+-
+ 	/* unregister from EDAC MCE */
+ 	amd_report_gart_errors(false);
+ 	amd_unregister_ecc_decoder(amd64_decode_bus_error);
+ 
+ 	/* Free the EDAC CORE resources */
++	mci->pvt_info = NULL;
++	mci_lookup[pvt->mc_node_id] = NULL;
++
++	kfree(pvt);
+ 	edac_mc_free(mci);
+ }
+ 
+@@ -3157,23 +3166,29 @@ static void amd64_setup_pci_device(void)
+ static int __init amd64_edac_init(void)
+ {
+ 	int nb, err = -ENODEV;
++	bool load_ok = false;
+ 
+ 	edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+ 
+ 	opstate_init();
+ 
+ 	if (cache_k8_northbridges() < 0)
+-		return err;
++		goto err_ret;
++
++	msrs = msrs_alloc();
++	if (!msrs)
++		goto err_ret;
+ 
+ 	err = pci_register_driver(&amd64_pci_driver);
+ 	if (err)
+-		return err;
++		goto err_pci;
+ 
+ 	/*
+ 	 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
+ 	 * amd64_pvt structs. These will be used in the 2nd stage init function
+ 	 * to finish initialization of the MC instances.
+ 	 */
++	err = -ENODEV;
+ 	for (nb = 0; nb < num_k8_northbridges; nb++) {
+ 		if (!pvt_lookup[nb])
+ 			continue;
+@@ -3181,16 +3196,21 @@ static int __init amd64_edac_init(void)
+ 		err = amd64_init_2nd_stage(pvt_lookup[nb]);
+ 		if (err)
+ 			goto err_2nd_stage;
+-	}
+ 
+-	amd64_setup_pci_device();
++		load_ok = true;
++	}
+ 
+-	return 0;
++	if (load_ok) {
++		amd64_setup_pci_device();
++		return 0;
++	}
+ 
+ err_2nd_stage:
+-	debugf0("2nd stage failed\n");
+ 	pci_unregister_driver(&amd64_pci_driver);
+-
++err_pci:
++	msrs_free(msrs);
++	msrs = NULL;
++err_ret:
+ 	return err;
+ }
+ 
+@@ -3200,6 +3220,9 @@ static void __exit amd64_edac_exit(void)
+ 		edac_pci_release_generic_ctl(amd64_ctl_pci);
+ 
+ 	pci_unregister_driver(&amd64_pci_driver);
++
++	msrs_free(msrs);
++	msrs = NULL;
+ }
+ 
+ module_init(amd64_edac_init);
+diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
+index c6f359a..bba6c94 100644
+--- a/drivers/edac/amd64_edac.h
++++ b/drivers/edac/amd64_edac.h
+@@ -147,6 +147,8 @@
+ #define MAX_CS_COUNT			8
+ #define DRAM_REG_COUNT			8
+ 
++#define ON true
++#define OFF false
+ 
+ /*
+  * PCI-defined configuration space registers
+@@ -386,10 +388,7 @@ enum {
+ #define K8_NBCAP_DUAL_NODE		BIT(1)
+ #define K8_NBCAP_DCT_DUAL		BIT(0)
+ 
+-/*
+- * MSR Regs
+- */
+-#define K8_MSR_MCGCTL			0x017b
++/* MSRs */
+ #define K8_MSR_MCGCTL_NBE		BIT(4)
+ 
+ #define K8_MSR_MC4CTL			0x0410
+@@ -487,7 +486,6 @@ struct amd64_pvt {
+ 	/* Save old hw registers' values before we modified them */
+ 	u32 nbctl_mcgctl_saved;		/* When true, following 2 are valid */
+ 	u32 old_nbctl;
+-	unsigned long old_mcgctl;	/* per core on this node */
+ 
+ 	/* MC Type Index value: socket F vs Family 10h */
+ 	u32 mc_type_index;
+@@ -495,6 +493,7 @@ struct amd64_pvt {
+ 	/* misc settings */
+ 	struct flags {
+ 		unsigned long cf8_extcfg:1;
++		unsigned long ecc_report:1;
+ 	} flags;
+ };
+ 
+diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+index bbfd110..afed886 100644
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
+ {
+ 	int count = 0;
+ 
++	/* disable all the possible outputs/crtcs before entering KMS mode */
++	drm_helper_disable_unused_functions(dev);
++
+ 	drm_fb_helper_parse_command_line(dev);
+ 
+ 	count = drm_helper_probe_connector_modes(dev,
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index c6777cb..19f93f2 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -249,13 +249,15 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 		if (ASIC_IS_DCE3(rdev))
+ 			atombios_enable_crtc_memreq(crtc, 1);
+ 		atombios_blank_crtc(crtc, 0);
+-		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
++		if (rdev->family < CHIP_R600)
++			drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ 		radeon_crtc_load_lut(crtc);
+ 		break;
+ 	case DRM_MODE_DPMS_STANDBY:
+ 	case DRM_MODE_DPMS_SUSPEND:
+ 	case DRM_MODE_DPMS_OFF:
+-		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
++		if (rdev->family < CHIP_R600)
++			drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ 		atombios_blank_crtc(crtc, 1);
+ 		if (ASIC_IS_DCE3(rdev))
+ 			atombios_enable_crtc_memreq(crtc, 0);
+diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
+index f8a465d..c8942ca 100644
+--- a/drivers/gpu/drm/radeon/radeon_test.c
++++ b/drivers/gpu/drm/radeon/radeon_test.c
+@@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev)
+ 	/* Number of tests =
+ 	 * (Total GTT - IB pool - writeback page - ring buffer) / test size
+ 	 */
+-	n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
+-	     rdev->cp.ring_size) / size;
++	n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
++	     rdev->cp.ring_size)) / size;
+ 
+ 	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
+ 	if (!gtt_obj) {
+diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
+index ebe38b6..864a371 100644
+--- a/drivers/hwmon/sht15.c
++++ b/drivers/hwmon/sht15.c
+@@ -305,7 +305,7 @@ static inline int sht15_calc_temp(struct sht15_data *data)
+ 	int d1 = 0;
+ 	int i;
+ 
+-	for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++)
++	for (i = 1; i < ARRAY_SIZE(temppoints); i++)
+ 		/* Find pointer to interpolate */
+ 		if (data->supply_uV > temppoints[i - 1].vdd) {
+ 			d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
+@@ -332,12 +332,12 @@ static inline int sht15_calc_humid(struct sht15_data *data)
+ 
+ 	const int c1 = -4;
+ 	const int c2 = 40500; /* x 10 ^ -6 */
+-	const int c3 = 2800; /* x10 ^ -9 */
++	const int c3 = -2800; /* x10 ^ -9 */
+ 
+ 	RHlinear = c1*1000
+ 		+ c2 * data->val_humid/1000
+ 		+ (data->val_humid * data->val_humid * c3)/1000000;
+-	return (temp - 25000) * (10000 + 800 * data->val_humid)
++	return (temp - 25000) * (10000 + 80 * data->val_humid)
+ 		/ 1000000 + RHlinear;
+ }
+ 
+diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
+index 951c57b..ede4658 100644
+--- a/drivers/lguest/segments.c
++++ b/drivers/lguest/segments.c
+@@ -179,8 +179,10 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
+ 	 * We assume the Guest has the same number of GDT entries as the
+ 	 * Host, otherwise we'd have to dynamically allocate the Guest GDT.
+ 	 */
+-	if (num >= ARRAY_SIZE(cpu->arch.gdt))
++	if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
+ 		kill_guest(cpu, "too many gdt entries %i", num);
++		return;
++	}
+ 
+ 	/* Set it up, then fix it. */
+ 	cpu->arch.gdt[num].a = lo;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 02e4551..c6a6685 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -282,7 +282,9 @@ static void mddev_put(mddev_t *mddev)
+ 	if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
+ 		return;
+ 	if (!mddev->raid_disks && list_empty(&mddev->disks) &&
+-	    !mddev->hold_active) {
++	    mddev->ctime == 0 && !mddev->hold_active) {
++		/* Array is not configured at all, and not held active,
++		 * so destroy it */
+ 		list_del(&mddev->all_mddevs);
+ 		if (mddev->gendisk) {
+ 			/* we did a probe so need to clean up.
+@@ -5071,6 +5073,10 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
+ 		mddev->minor_version = info->minor_version;
+ 		mddev->patch_version = info->patch_version;
+ 		mddev->persistent = !info->not_persistent;
++		/* ensure mddev_put doesn't delete this now that there
++		 * is some minimal configuration.
++		 */
++		mddev->ctime         = get_seconds();
+ 		return 0;
+ 	}
+ 	mddev->major_version = MD_MAJOR_VERSION;
+diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
+index 0bc2cf5..2bed9e2 100644
+--- a/drivers/media/video/ov511.c
++++ b/drivers/media/video/ov511.c
+@@ -5878,7 +5878,7 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 		goto error;
+ 	}
+ 
+-	mutex_lock(&ov->lock);
++	mutex_unlock(&ov->lock);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/e100.c b/drivers/net/e100.c
+index d269a68..0c53c92 100644
+--- a/drivers/net/e100.c
++++ b/drivers/net/e100.c
+@@ -1817,6 +1817,7 @@ static int e100_alloc_cbs(struct nic *nic)
+ 				  &nic->cbs_dma_addr);
+ 	if (!nic->cbs)
+ 		return -ENOMEM;
++	memset(nic->cbs, 0, count * sizeof(struct cb));
+ 
+ 	for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
+ 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
+@@ -1825,7 +1826,6 @@ static int e100_alloc_cbs(struct nic *nic)
+ 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
+ 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
+ 			((i+1) % count) * sizeof(struct cb));
+-		cb->skb = NULL;
+ 	}
+ 
+ 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
+diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
+index b091e20..f14d225 100644
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -324,7 +324,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
+ 		dbg("%02X:", netdev->dev_addr[i]);
+ 	dbg("%02X\n", netdev->dev_addr[i]);
+ 	/* Set the IDR registers. */
+-	set_registers(dev, IDR, sizeof(netdev->dev_addr), netdev->dev_addr);
++	set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
+ #ifdef EEPROM_WRITE
+ 	{
+ 	u8 cr;
+diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
+index 95a8e23..8a82c75 100644
+--- a/drivers/net/wireless/ath/ath5k/base.c
++++ b/drivers/net/wireless/ath/ath5k/base.c
+@@ -2349,6 +2349,9 @@ ath5k_init(struct ath5k_softc *sc)
+ 	 */
+ 	ath5k_stop_locked(sc);
+ 
++	/* Set PHY calibration interval */
++	ah->ah_cal_intval = ath5k_calinterval;
++
+ 	/*
+ 	 * The basic interface to setting the hardware in a good
+ 	 * state is ``reset''.  On return the hardware is known to
+@@ -2376,10 +2379,6 @@ ath5k_init(struct ath5k_softc *sc)
+ 
+ 	/* Set ack to be sent at low bit-rates */
+ 	ath5k_hw_set_ack_bitrate_high(ah, false);
+-
+-	/* Set PHY calibration inteval */
+-	ah->ah_cal_intval = ath5k_calinterval;
+-
+ 	ret = 0;
+ done:
+ 	mmiowb();
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index 57f1463..ff4383b 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -408,7 +408,7 @@ struct ath9k_hw_version {
+  * Using de Bruijin sequence to to look up 1's index in a 32 bit number
+  * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
+  */
+-#define debruijn32 0x077CB531UL
++#define debruijn32 0x077CB531U
+ 
+ struct ath_gen_timer_configuration {
+ 	u32 next_addr;
+diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
+index d4d9d82..110c16d 100644
+--- a/drivers/net/wireless/ath/ath9k/mac.c
++++ b/drivers/net/wireless/ath/ath9k/mac.c
+@@ -155,7 +155,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
+ 		wait = wait_time;
+ 		while (ath9k_hw_numtxpending(ah, q)) {
+ 			if ((--wait) == 0) {
+-				DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
++				DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ 					"Failed to stop TX DMA in 100 "
+ 					"msec after killing last frame\n");
+ 				break;
+diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
+index ff65f85..9720c4d 100644
+--- a/drivers/net/wireless/ath/ath9k/mac.h
++++ b/drivers/net/wireless/ath/ath9k/mac.h
+@@ -77,6 +77,9 @@
+ #define ATH9K_TXERR_XTXOP          0x08
+ #define ATH9K_TXERR_TIMER_EXPIRED  0x10
+ #define ATH9K_TX_ACKED		   0x20
++#define ATH9K_TXERR_MASK						\
++	(ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO |	\
++	 ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
+ 
+ #define ATH9K_TX_BA                0x01
+ #define ATH9K_TX_PWRMGMT           0x02
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 59359e3..80df8f3 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -2147,6 +2147,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
+ 		return; /* another wiphy still in use */
+ 	}
+ 
++	/* Ensure HW is awake when we try to shut it down. */
++	ath9k_ps_wakeup(sc);
++
+ 	if (sc->sc_flags & SC_OP_BTCOEX_ENABLED) {
+ 		ath9k_hw_btcoex_disable(sc->sc_ah);
+ 		if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE)
+@@ -2167,6 +2170,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
+ 	/* disable HAL and put h/w to sleep */
+ 	ath9k_hw_disable(sc->sc_ah);
+ 	ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1);
++	ath9k_ps_restore(sc);
++
++	/* Finally, put the chip in FULL SLEEP mode */
+ 	ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+ 
+ 	sc->sc_flags |= SC_OP_INVALID;
+@@ -2277,8 +2283,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
+ 	if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
+ 	    (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
+ 	    (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
++		ath9k_ps_wakeup(sc);
+ 		ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+ 		ath_beacon_return(sc, avp);
++		ath9k_ps_restore(sc);
+ 	}
+ 
+ 	sc->sc_flags &= ~SC_OP_BEACONS;
+@@ -2724,15 +2732,21 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
+ 	case IEEE80211_AMPDU_RX_STOP:
+ 		break;
+ 	case IEEE80211_AMPDU_TX_START:
++		ath9k_ps_wakeup(sc);
+ 		ath_tx_aggr_start(sc, sta, tid, ssn);
+ 		ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
++		ath9k_ps_restore(sc);
+ 		break;
+ 	case IEEE80211_AMPDU_TX_STOP:
++		ath9k_ps_wakeup(sc);
+ 		ath_tx_aggr_stop(sc, sta, tid);
+ 		ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
++		ath9k_ps_restore(sc);
+ 		break;
+ 	case IEEE80211_AMPDU_TX_OPERATIONAL:
++		ath9k_ps_wakeup(sc);
+ 		ath_tx_aggr_resume(sc, sta, tid);
++		ath9k_ps_restore(sc);
+ 		break;
+ 	default:
+ 		DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n");
+diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
+index d83b77f..c0d7e65 100644
+--- a/drivers/net/wireless/ath/ath9k/reg.h
++++ b/drivers/net/wireless/ath/ath9k/reg.h
+@@ -969,10 +969,10 @@ enum {
+ #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S         4
+ #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF        0x00000080
+ #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S      7
++#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB      0x00000400
++#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S    10
+ #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB        0x00001000
+ #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S      12
+-#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB      0x00001000
+-#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S    1
+ #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB         0x00008000
+ #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S       15
+ #define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE        0x00010000
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 4753909..2c6b063 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -1076,10 +1076,10 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
+ 	if (npend) {
+ 		int r;
+ 
+-		DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
++		DPRINTF(sc, ATH_DBG_FATAL, "Unable to stop TxDMA. Reset HAL!\n");
+ 
+ 		spin_lock_bh(&sc->sc_resetlock);
+-		r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
++		r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
+ 		if (r)
+ 			DPRINTF(sc, ATH_DBG_FATAL,
+ 				"Unable to reset hardware; reset status %d\n",
+@@ -2020,7 +2020,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
+ 		if (bf_isaggr(bf))
+ 			txq->axq_aggr_depth--;
+ 
+-		txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT);
++		txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
+ 		txq->axq_tx_inprogress = false;
+ 		spin_unlock_bh(&txq->axq_lock);
+ 
+diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
+index ffdce6f..78016ae 100644
+--- a/drivers/net/wireless/b43/rfkill.c
++++ b/drivers/net/wireless/b43/rfkill.c
+@@ -33,8 +33,14 @@ bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
+ 		      & B43_MMIO_RADIO_HWENABLED_HI_MASK))
+ 			return 1;
+ 	} else {
+-		if (b43_status(dev) >= B43_STAT_STARTED &&
+-		    b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
++		/* To prevent CPU fault on PPC, do not read a register
++		 * unless the interface is started; however, on resume
++		 * for hibernation, this routine is entered early. When
++		 * that happens, unconditionally return TRUE.
++		 */
++		if (b43_status(dev) < B43_STAT_STARTED)
++			return 1;
++		if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
+ 		    & B43_MMIO_RADIO_HWENABLED_LO_MASK)
+ 			return 1;
+ 	}
+diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
+index f059b49..9d60f6c 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
++++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
+@@ -2895,6 +2895,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
+ 	.mod_params = &iwl3945_mod_params,
+ 	.use_isr_legacy = true,
+ 	.ht_greenfield_support = false,
++	.broken_powersave = true,
+ };
+ 
+ static struct iwl_cfg iwl3945_abg_cfg = {
+@@ -2909,6 +2910,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
+ 	.mod_params = &iwl3945_mod_params,
+ 	.use_isr_legacy = true,
+ 	.ht_greenfield_support = false,
++	.broken_powersave = true,
+ };
+ 
+ struct pci_device_id iwl3945_hw_card_ids[] = {
+diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
+index 6f703a0..f4e2e84 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
++++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
+@@ -1337,7 +1337,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
+ 	iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
+ 
+ 	/* calculate tx gain adjustment based on power supply voltage */
+-	voltage = priv->calib_info->voltage;
++	voltage = le16_to_cpu(priv->calib_info->voltage);
+ 	init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
+ 	voltage_compensation =
+ 	    iwl4965_get_voltage_compensation(voltage, init_voltage);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+index 4ef6804..bc056e9 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
++++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+@@ -92,11 +92,15 @@
+ 
+ static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+ {
+-	u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
+-						       EEPROM_5000_TEMPERATURE);
+-	/* offset =  temperature -  voltage / coef */
+-	s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
+-	return offset;
++	u16 temperature, voltage;
++	__le16 *temp_calib =
++		(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
++
++	temperature = le16_to_cpu(temp_calib[0]);
++	voltage = le16_to_cpu(temp_calib[1]);
++
++	/* offset = temp - volt / coeff */
++	return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
+ }
+ 
+ /* Fixed (non-configurable) rx data from phy */
+diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
+index 6e6f516..94a1225 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
+@@ -460,14 +460,15 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
+ static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
+ {
+ 	struct iwl_calib_xtal_freq_cmd cmd;
+-	u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
++	__le16 *xtal_calib =
++		(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
+ 
+ 	cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
+ 	cmd.hdr.first_group = 0;
+ 	cmd.hdr.groups_num = 1;
+ 	cmd.hdr.data_valid = 1;
+-	cmd.cap_pin1 = (u8)xtal_calib[0];
+-	cmd.cap_pin2 = (u8)xtal_calib[1];
++	cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
++	cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
+ 	return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
+ 			     (u8 *)&cmd, sizeof(cmd));
+ }
+diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
+index 028d505..c2d9b7a 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
++++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
+@@ -1149,7 +1149,7 @@ struct iwl_priv {
+ 	u32 last_beacon_time;
+ 	u64 last_tsf;
+ 
+-	/* eeprom */
++	/* eeprom -- this is in the card's little endian byte order */
+ 	u8 *eeprom;
+ 	int    nvm_device_type;
+ 	struct iwl_eeprom_calib_info *calib_info;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+index e14c995..18dc3a4 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
++++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+@@ -337,7 +337,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
+ 	return ret;
+ }
+ 
+-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
++static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
+ {
+ 	int ret = 0;
+ 	u32 r;
+@@ -370,7 +370,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
+ 				CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
+ 		IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
+ 	}
+-	*eeprom_data = le16_to_cpu((__force __le16)(r >> 16));
++	*eeprom_data = cpu_to_le16(r >> 16);
+ 	return 0;
+ }
+ 
+@@ -379,7 +379,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
+  */
+ static bool iwl_is_otp_empty(struct iwl_priv *priv)
+ {
+-	u16 next_link_addr = 0, link_value;
++	u16 next_link_addr = 0;
++	__le16 link_value;
+ 	bool is_empty = false;
+ 
+ 	/* locate the beginning of OTP link list */
+@@ -409,7 +410,8 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
+ static int iwl_find_otp_image(struct iwl_priv *priv,
+ 					u16 *validblockaddr)
+ {
+-	u16 next_link_addr = 0, link_value = 0, valid_addr;
++	u16 next_link_addr = 0, valid_addr;
++	__le16 link_value = 0;
+ 	int usedblocks = 0;
+ 
+ 	/* set addressing mode to absolute to traverse the link list */
+@@ -429,7 +431,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
+ 		 * check for more block on the link list
+ 		 */
+ 		valid_addr = next_link_addr;
+-		next_link_addr = link_value * sizeof(u16);
++		next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
+ 		IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
+ 			       usedblocks, next_link_addr);
+ 		if (iwl_read_otp_word(priv, next_link_addr, &link_value))
+@@ -463,7 +465,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
+  */
+ int iwl_eeprom_init(struct iwl_priv *priv)
+ {
+-	u16 *e;
++	__le16 *e;
+ 	u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
+ 	int sz;
+ 	int ret;
+@@ -482,7 +484,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
+ 		ret = -ENOMEM;
+ 		goto alloc_err;
+ 	}
+-	e = (u16 *)priv->eeprom;
++	e = (__le16 *)priv->eeprom;
+ 
+ 	ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
+ 	if (ret < 0) {
+@@ -521,7 +523,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
+ 		}
+ 		for (addr = validblockaddr; addr < validblockaddr + sz;
+ 		     addr += sizeof(u16)) {
+-			u16 eeprom_data;
++			__le16 eeprom_data;
+ 
+ 			ret = iwl_read_otp_word(priv, addr, &eeprom_data);
+ 			if (ret)
+@@ -545,7 +547,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
+ 				goto done;
+ 			}
+ 			r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
+-			e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
++			e[addr / 2] = cpu_to_le16(r >> 16);
+ 		}
+ 	}
+ 	ret = 0;
+@@ -709,7 +711,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
+ 	ch_info->ht40_min_power = 0;
+ 	ch_info->ht40_scan_power = eeprom_ch->max_power_avg;
+ 	ch_info->ht40_flags = eeprom_ch->flags;
+-	ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
++	if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
++		ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+index 80b9e45..fc93f12 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
++++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+@@ -133,7 +133,7 @@ struct iwl_eeprom_channel {
+  *
+  */
+ struct iwl_eeprom_enhanced_txpwr {
+-	u16 reserved;
++	__le16 common;
+ 	s8 chain_a_max;
+ 	s8 chain_b_max;
+ 	s8 chain_c_max;
+@@ -347,7 +347,7 @@ struct iwl_eeprom_calib_subband_info {
+ struct iwl_eeprom_calib_info {
+ 	u8 saturation_power24;	/* half-dBm (e.g. "34" = 17 dBm) */
+ 	u8 saturation_power52;	/* half-dBm */
+-	s16 voltage;		/* signed */
++	__le16 voltage;		/* signed */
+ 	struct iwl_eeprom_calib_subband_info
+ 		band_info[EEPROM_TX_POWER_BANDS];
+ } __attribute__ ((packed));
+diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+index d00a803..5f26c93 100644
+--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+@@ -562,6 +562,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+ 	txq = &priv->txq[txq_id];
+ 	q = &txq->q;
+ 
++	if ((iwl_queue_space(q) < q->high_mark))
++		goto drop;
++
+ 	spin_lock_irqsave(&priv->lock, flags);
+ 
+ 	idx = get_cmd_index(q, q->write_ptr, 0);
+@@ -3854,9 +3857,11 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
+ 	/* Tell mac80211 our characteristics */
+ 	hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ 		    IEEE80211_HW_NOISE_DBM |
+-		    IEEE80211_HW_SPECTRUM_MGMT |
+-		    IEEE80211_HW_SUPPORTS_PS |
+-		    IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
++		    IEEE80211_HW_SPECTRUM_MGMT;
++
++	if (!priv->cfg->broken_powersave)
++		hw->flags |= IEEE80211_HW_SUPPORTS_PS |
++			     IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ 
+ 	hw->wiphy->interface_modes =
+ 		BIT(NL80211_IFTYPE_STATION) |
+diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
+index 1b02a4e..93c8989 100644
+--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
++++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
+@@ -258,7 +258,7 @@ struct iwm_priv {
+ 
+ 	struct sk_buff_head rx_list;
+ 	struct list_head rx_tickets;
+-	struct list_head rx_packets[IWM_RX_ID_HASH];
++	struct list_head rx_packets[IWM_RX_ID_HASH + 1];
+ 	struct workqueue_struct *rx_wq;
+ 	struct work_struct rx_worker;
+ 
+diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
+index be837a0..01c738b 100644
+--- a/drivers/net/wireless/libertas/wext.c
++++ b/drivers/net/wireless/libertas/wext.c
+@@ -1953,10 +1953,8 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
+ 	if (priv->connect_status == LBS_CONNECTED) {
+ 		memcpy(extra, priv->curbssparams.ssid,
+ 		       priv->curbssparams.ssid_len);
+-		extra[priv->curbssparams.ssid_len] = '\0';
+ 	} else {
+ 		memset(extra, 0, 32);
+-		extra[priv->curbssparams.ssid_len] = '\0';
+ 	}
+ 	/*
+ 	 * If none, we may want to get the one that was set
+diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
+index 7698fdd..31ca241 100644
+--- a/drivers/net/wireless/orinoco/wext.c
++++ b/drivers/net/wireless/orinoco/wext.c
+@@ -23,7 +23,7 @@
+ #define MAX_RID_LEN 1024
+ 
+ /* Helper routine to record keys
+- * Do not call from interrupt context */
++ * It is called under orinoco_lock so it may not sleep */
+ static int orinoco_set_key(struct orinoco_private *priv, int index,
+ 			   enum orinoco_alg alg, const u8 *key, int key_len,
+ 			   const u8 *seq, int seq_len)
+@@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
+ 	kzfree(priv->keys[index].seq);
+ 
+ 	if (key_len) {
+-		priv->keys[index].key = kzalloc(key_len, GFP_KERNEL);
++		priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
+ 		if (!priv->keys[index].key)
+ 			goto nomem;
+ 	} else
+ 		priv->keys[index].key = NULL;
+ 
+ 	if (seq_len) {
+-		priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL);
++		priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
+ 		if (!priv->keys[index].seq)
+ 			goto free_key;
+ 	} else
+diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
+index b20e3ea..9a6ceb4 100644
+--- a/drivers/net/wireless/rt2x00/rt61pci.c
++++ b/drivers/net/wireless/rt2x00/rt61pci.c
+@@ -2538,6 +2538,11 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ 	unsigned int i;
+ 
+ 	/*
++	 * Disable powersaving as default.
++	 */
++	rt2x00dev->hw->wiphy->ps_default = false;
++
++	/*
+ 	 * Initialize all hw fields.
+ 	 */
+ 	rt2x00dev->hw->flags =
+diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
+index d8b4229..4d922e4 100644
+--- a/drivers/platform/x86/acerhdf.c
++++ b/drivers/platform/x86/acerhdf.c
+@@ -640,9 +640,10 @@ static void __exit acerhdf_exit(void)
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Peter Feuerer");
+ MODULE_DESCRIPTION("Aspire One temperature and fan driver");
+-MODULE_ALIAS("dmi:*:*Acer*:*:");
+-MODULE_ALIAS("dmi:*:*Gateway*:*:");
+-MODULE_ALIAS("dmi:*:*Packard Bell*:*:");
++MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
++MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
++MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:");
++MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:");
+ 
+ module_init(acerhdf_init);
+ module_exit(acerhdf_exit);
+diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
+index 4e49b4a..8174ec9 100644
+--- a/drivers/s390/block/dasd_diag.c
++++ b/drivers/s390/block/dasd_diag.c
+@@ -145,6 +145,15 @@ dasd_diag_erp(struct dasd_device *device)
+ 
+ 	mdsk_term_io(device);
+ 	rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
++	if (rc == 4) {
++		if (!(device->features & DASD_FEATURE_READONLY)) {
++			dev_warn(&device->cdev->dev,
++				 "The access mode of a DIAG device changed"
++				 " to read-only");
++			device->features |= DASD_FEATURE_READONLY;
++		}
++		rc = 0;
++	}
+ 	if (rc)
+ 		dev_warn(&device->cdev->dev, "DIAG ERP failed with "
+ 			    "rc=%d\n", rc);
+@@ -433,16 +442,20 @@ dasd_diag_check_device(struct dasd_device *device)
+ 	for (sb = 512; sb < bsize; sb = sb << 1)
+ 		block->s2b_shift++;
+ 	rc = mdsk_init_io(device, block->bp_block, 0, NULL);
+-	if (rc) {
++	if (rc && (rc != 4)) {
+ 		dev_warn(&device->cdev->dev, "DIAG initialization "
+ 			"failed with rc=%d\n", rc);
+ 		rc = -EIO;
+ 	} else {
++		if (rc == 4)
++			device->features |= DASD_FEATURE_READONLY;
+ 		dev_info(&device->cdev->dev,
+-			 "New DASD with %ld byte/block, total size %ld KB\n",
++			 "New DASD with %ld byte/block, total size %ld KB%s\n",
+ 			 (unsigned long) block->bp_block,
+ 			 (unsigned long) (block->blocks <<
+-					  block->s2b_shift) >> 1);
++					  block->s2b_shift) >> 1,
++			 (rc == 4) ? ", read-only device" : "");
++		rc = 0;
+ 	}
+ out_label:
+ 	free_page((long) label);
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index 76d294f..c3ff9a6 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -6516,6 +6516,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
+ 	int rc;
+ 
+ 	ENTER;
++	ioa_cfg->pdev->state_saved = true;
+ 	rc = pci_restore_state(ioa_cfg->pdev);
+ 
+ 	if (rc != PCIBIOS_SUCCESSFUL) {
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index b79fca7..06bbe0d 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -2016,13 +2016,13 @@ skip_dpc:
+ 	DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
+ 	    base_vha->host_no, ha));
+ 
+-	base_vha->flags.init_done = 1;
+-	base_vha->flags.online = 1;
+-
+ 	ret = scsi_add_host(host, &pdev->dev);
+ 	if (ret)
+ 		goto probe_failed;
+ 
++	base_vha->flags.init_done = 1;
++	base_vha->flags.online = 1;
++
+ 	ha->isp_ops->enable_intrs(ha);
+ 
+ 	scsi_scan_host(host);
+diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
+index c6f70da..45be82f 100644
+--- a/drivers/scsi/scsi_transport_fc.c
++++ b/drivers/scsi/scsi_transport_fc.c
+@@ -648,11 +648,22 @@ static __init int fc_transport_init(void)
+ 		return error;
+ 	error = transport_class_register(&fc_vport_class);
+ 	if (error)
+-		return error;
++		goto unreg_host_class;
+ 	error = transport_class_register(&fc_rport_class);
+ 	if (error)
+-		return error;
+-	return transport_class_register(&fc_transport_class);
++		goto unreg_vport_class;
++	error = transport_class_register(&fc_transport_class);
++	if (error)
++		goto unreg_rport_class;
++	return 0;
++
++unreg_rport_class:
++	transport_class_unregister(&fc_rport_class);
++unreg_vport_class:
++	transport_class_unregister(&fc_vport_class);
++unreg_host_class:
++	transport_class_unregister(&fc_host_class);
++	return error;
+ }
+ 
+ static void __exit fc_transport_exit(void)
+diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
+index 12d58a7..5081f97 100644
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -552,13 +552,15 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
+ 	SRpnt->waiting = waiting;
+ 
+ 	if (STp->buffer->do_dio) {
++		mdata->page_order = 0;
+ 		mdata->nr_entries = STp->buffer->sg_segs;
+ 		mdata->pages = STp->buffer->mapped_pages;
+ 	} else {
++		mdata->page_order = STp->buffer->reserved_page_order;
+ 		mdata->nr_entries =
+ 			DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
+-		STp->buffer->map_data.pages = STp->buffer->reserved_pages;
+-		STp->buffer->map_data.offset = 0;
++		mdata->pages = STp->buffer->reserved_pages;
++		mdata->offset = 0;
+ 	}
+ 
+ 	memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
+@@ -3718,7 +3720,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
+ 		priority |= __GFP_ZERO;
+ 
+ 	if (STbuffer->frp_segs) {
+-		order = STbuffer->map_data.page_order;
++		order = STbuffer->reserved_page_order;
+ 		b_size = PAGE_SIZE << order;
+ 	} else {
+ 		for (b_size = PAGE_SIZE, order = 0;
+@@ -3751,7 +3753,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
+ 		segs++;
+ 	}
+ 	STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
+-	STbuffer->map_data.page_order = order;
++	STbuffer->reserved_page_order = order;
+ 
+ 	return 1;
+ }
+@@ -3764,7 +3766,7 @@ static void clear_buffer(struct st_buffer * st_bp)
+ 
+ 	for (i=0; i < st_bp->frp_segs; i++)
+ 		memset(page_address(st_bp->reserved_pages[i]), 0,
+-		       PAGE_SIZE << st_bp->map_data.page_order);
++		       PAGE_SIZE << st_bp->reserved_page_order);
+ 	st_bp->cleared = 1;
+ }
+ 
+@@ -3772,7 +3774,7 @@ static void clear_buffer(struct st_buffer * st_bp)
+ /* Release the extra buffer */
+ static void normalize_buffer(struct st_buffer * STbuffer)
+ {
+-	int i, order = STbuffer->map_data.page_order;
++	int i, order = STbuffer->reserved_page_order;
+ 
+ 	for (i = 0; i < STbuffer->frp_segs; i++) {
+ 		__free_pages(STbuffer->reserved_pages[i], order);
+@@ -3780,7 +3782,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
+ 	}
+ 	STbuffer->frp_segs = 0;
+ 	STbuffer->sg_segs = 0;
+-	STbuffer->map_data.page_order = 0;
++	STbuffer->reserved_page_order = 0;
+ 	STbuffer->map_data.offset = 0;
+ }
+ 
+@@ -3790,7 +3792,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
+ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
+ {
+ 	int i, cnt, res, offset;
+-	int length = PAGE_SIZE << st_bp->map_data.page_order;
++	int length = PAGE_SIZE << st_bp->reserved_page_order;
+ 
+ 	for (i = 0, offset = st_bp->buffer_bytes;
+ 	     i < st_bp->frp_segs && offset >= length; i++)
+@@ -3822,7 +3824,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
+ static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
+ {
+ 	int i, cnt, res, offset;
+-	int length = PAGE_SIZE << st_bp->map_data.page_order;
++	int length = PAGE_SIZE << st_bp->reserved_page_order;
+ 
+ 	for (i = 0, offset = st_bp->read_pointer;
+ 	     i < st_bp->frp_segs && offset >= length; i++)
+@@ -3855,7 +3857,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
+ {
+ 	int src_seg, dst_seg, src_offset = 0, dst_offset;
+ 	int count, total;
+-	int length = PAGE_SIZE << st_bp->map_data.page_order;
++	int length = PAGE_SIZE << st_bp->reserved_page_order;
+ 
+ 	if (offset == 0)
+ 		return;
+@@ -4577,7 +4579,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
+         }
+ 
+ 	mdata->offset = uaddr & ~PAGE_MASK;
+-	mdata->page_order = 0;
+ 	STbp->mapped_pages = pages;
+ 
+ 	return nr_pages;
+diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
+index 544dc6b..f91a67c 100644
+--- a/drivers/scsi/st.h
++++ b/drivers/scsi/st.h
+@@ -46,6 +46,7 @@ struct st_buffer {
+ 	struct st_request *last_SRpnt;
+ 	struct st_cmdstatus cmdstat;
+ 	struct page **reserved_pages;
++	int reserved_page_order;
+ 	struct page **mapped_pages;
+ 	struct rq_map_data map_data;
+ 	unsigned char *b_data;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 0f857e6..8b0c235 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1612,12 +1612,12 @@ static inline void announce_device(struct usb_device *udev) { }
+ #endif
+ 
+ /**
+- * usb_configure_device_otg - FIXME (usbcore-internal)
++ * usb_enumerate_device_otg - FIXME (usbcore-internal)
+  * @udev: newly addressed device (in ADDRESS state)
+  *
+- * Do configuration for On-The-Go devices
++ * Finish enumeration for On-The-Go devices
+  */
+-static int usb_configure_device_otg(struct usb_device *udev)
++static int usb_enumerate_device_otg(struct usb_device *udev)
+ {
+ 	int err = 0;
+ 
+@@ -1688,7 +1688,7 @@ fail:
+ 
+ 
+ /**
+- * usb_configure_device - Detect and probe device intfs/otg (usbcore-internal)
++ * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
+  * @udev: newly addressed device (in ADDRESS state)
+  *
+  * This is only called by usb_new_device() and usb_authorize_device()
+@@ -1699,7 +1699,7 @@ fail:
+  * the string descriptors, as they will be errored out by the device
+  * until it has been authorized.
+  */
+-static int usb_configure_device(struct usb_device *udev)
++static int usb_enumerate_device(struct usb_device *udev)
+ {
+ 	int err;
+ 
+@@ -1723,7 +1723,7 @@ static int usb_configure_device(struct usb_device *udev)
+ 						      udev->descriptor.iManufacturer);
+ 		udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
+ 	}
+-	err = usb_configure_device_otg(udev);
++	err = usb_enumerate_device_otg(udev);
+ fail:
+ 	return err;
+ }
+@@ -1733,8 +1733,8 @@ fail:
+  * usb_new_device - perform initial device setup (usbcore-internal)
+  * @udev: newly addressed device (in ADDRESS state)
+  *
+- * This is called with devices which have been enumerated, but not yet
+- * configured.  The device descriptor is available, but not descriptors
++ * This is called with devices which have been detected but not fully
++ * enumerated.  The device descriptor is available, but not descriptors
+  * for any device configuration.  The caller must have locked either
+  * the parent hub (if udev is a normal device) or else the
+  * usb_bus_list_lock (if udev is a root hub).  The parent's pointer to
+@@ -1757,8 +1757,8 @@ int usb_new_device(struct usb_device *udev)
+ 	if (udev->parent)
+ 		usb_autoresume_device(udev->parent);
+ 
+-	usb_detect_quirks(udev);		/* Determine quirks */
+-	err = usb_configure_device(udev);	/* detect & probe dev/intfs */
++	usb_detect_quirks(udev);
++	err = usb_enumerate_device(udev);	/* Read descriptors */
+ 	if (err < 0)
+ 		goto fail;
+ 	dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
+@@ -1803,21 +1803,23 @@ fail:
+  */
+ int usb_deauthorize_device(struct usb_device *usb_dev)
+ {
+-	unsigned cnt;
+ 	usb_lock_device(usb_dev);
+ 	if (usb_dev->authorized == 0)
+ 		goto out_unauthorized;
++
+ 	usb_dev->authorized = 0;
+ 	usb_set_configuration(usb_dev, -1);
++
++	kfree(usb_dev->product);
+ 	usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
++	kfree(usb_dev->manufacturer);
+ 	usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
++	kfree(usb_dev->serial);
+ 	usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
+-	kfree(usb_dev->config);
+-	usb_dev->config = NULL;
+-	for (cnt = 0; cnt < usb_dev->descriptor.bNumConfigurations; cnt++)
+-		kfree(usb_dev->rawdescriptors[cnt]);
++
++	usb_destroy_configuration(usb_dev);
+ 	usb_dev->descriptor.bNumConfigurations = 0;
+-	kfree(usb_dev->rawdescriptors);
++
+ out_unauthorized:
+ 	usb_unlock_device(usb_dev);
+ 	return 0;
+@@ -1827,15 +1829,11 @@ out_unauthorized:
+ int usb_authorize_device(struct usb_device *usb_dev)
+ {
+ 	int result = 0, c;
++
+ 	usb_lock_device(usb_dev);
+ 	if (usb_dev->authorized == 1)
+ 		goto out_authorized;
+-	kfree(usb_dev->product);
+-	usb_dev->product = NULL;
+-	kfree(usb_dev->manufacturer);
+-	usb_dev->manufacturer = NULL;
+-	kfree(usb_dev->serial);
+-	usb_dev->serial = NULL;
++
+ 	result = usb_autoresume_device(usb_dev);
+ 	if (result < 0) {
+ 		dev_err(&usb_dev->dev,
+@@ -1848,10 +1846,18 @@ int usb_authorize_device(struct usb_device *usb_dev)
+ 			"authorization: %d\n", result);
+ 		goto error_device_descriptor;
+ 	}
++
++	kfree(usb_dev->product);
++	usb_dev->product = NULL;
++	kfree(usb_dev->manufacturer);
++	usb_dev->manufacturer = NULL;
++	kfree(usb_dev->serial);
++	usb_dev->serial = NULL;
++
+ 	usb_dev->authorized = 1;
+-	result = usb_configure_device(usb_dev);
++	result = usb_enumerate_device(usb_dev);
+ 	if (result < 0)
+-		goto error_configure;
++		goto error_enumerate;
+ 	/* Choose and set the configuration.  This registers the interfaces
+ 	 * with the driver core and lets interface drivers bind to them.
+ 	 */
+@@ -1866,8 +1872,10 @@ int usb_authorize_device(struct usb_device *usb_dev)
+ 		}
+ 	}
+ 	dev_info(&usb_dev->dev, "authorized to connect\n");
+-error_configure:
++
++error_enumerate:
+ error_device_descriptor:
++	usb_autosuspend_device(usb_dev);
+ error_autoresume:
+ out_authorized:
+ 	usb_unlock_device(usb_dev);	// complements locktree
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index 7ec3041..8752e55 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -82,9 +82,13 @@ static ssize_t  show_##name(struct device *dev,				\
+ 		struct device_attribute *attr, char *buf)		\
+ {									\
+ 	struct usb_device *udev;					\
++	int retval;							\
+ 									\
+ 	udev = to_usb_device(dev);					\
+-	return sprintf(buf, "%s\n", udev->name);			\
++	usb_lock_device(udev);						\
++	retval = sprintf(buf, "%s\n", udev->name);			\
++	usb_unlock_device(udev);					\
++	return retval;							\
+ }									\
+ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+ 
+diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
+index 1d8e39a..62ff5e7 100644
+--- a/drivers/usb/misc/appledisplay.c
++++ b/drivers/usb/misc/appledisplay.c
+@@ -72,8 +72,8 @@ struct appledisplay {
+ 	struct usb_device *udev;	/* usb device */
+ 	struct urb *urb;		/* usb request block */
+ 	struct backlight_device *bd;	/* backlight device */
+-	char *urbdata;			/* interrupt URB data buffer */
+-	char *msgdata;			/* control message data buffer */
++	u8 *urbdata;			/* interrupt URB data buffer */
++	u8 *msgdata;			/* control message data buffer */
+ 
+ 	struct delayed_work work;
+ 	int button_pressed;
+diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
+index 602ee05..59860b3 100644
+--- a/drivers/usb/misc/emi62.c
++++ b/drivers/usb/misc/emi62.c
+@@ -167,7 +167,7 @@ static int emi62_load_firmware (struct usb_device *dev)
+ 			err("%s - error loading firmware: error = %d", __func__, err);
+ 			goto wraperr;
+ 		}
+-	} while (i > 0);
++	} while (rec);
+ 
+ 	/* Assert reset (stop the CPU in the EMI) */
+ 	err = emi62_set_reset(dev,1);
+diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
+index 1c44b97..067e5a9 100644
+--- a/drivers/usb/musb/musb_gadget_ep0.c
++++ b/drivers/usb/musb/musb_gadget_ep0.c
+@@ -647,7 +647,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
+ 			musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ 			break;
+ 		default:
+-			ERR("SetupEnd came in a wrong ep0stage %s",
++			ERR("SetupEnd came in a wrong ep0stage %s\n",
+ 			    decode_ep0stage(musb->ep0_state));
+ 		}
+ 		csr = musb_readw(regs, MUSB_CSR0);
+@@ -770,12 +770,18 @@ setup:
+ 				handled = service_zero_data_request(
+ 						musb, &setup);
+ 
++				/*
++				 * We're expecting no data in any case, so
++				 * always set the DATAEND bit -- doing this
++				 * here helps avoid SetupEnd interrupt coming
++				 * in the idle stage when we're stalling...
++				 */
++				musb->ackpend |= MUSB_CSR0_P_DATAEND;
++
+ 				/* status stage might be immediate */
+-				if (handled > 0) {
+-					musb->ackpend |= MUSB_CSR0_P_DATAEND;
++				if (handled > 0)
+ 					musb->ep0_state =
+ 						MUSB_EP0_STAGE_STATUSIN;
+-				}
+ 				break;
+ 
+ 			/* sequence #1 (IN to host), includes GET_STATUS
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index dffc8a1..be3dff1 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -340,6 +340,10 @@ static int  option_resume(struct usb_serial *serial);
+ #define FOUR_G_SYSTEMS_VENDOR_ID		0x1c9e
+ #define FOUR_G_SYSTEMS_PRODUCT_W14		0x9603
+ 
++/* Haier products */
++#define HAIER_VENDOR_ID				0x201e
++#define HAIER_PRODUCT_CE100			0x2009
++
+ static struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -641,6 +645,7 @@ static struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+ 	{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ 	{ USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
++	{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 3800da7..649fcdf 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -843,7 +843,7 @@ postcore_initcall(xenbus_probe_init);
+ 
+ MODULE_LICENSE("GPL");
+ 
+-static int is_disconnected_device(struct device *dev, void *data)
++static int is_device_connecting(struct device *dev, void *data)
+ {
+ 	struct xenbus_device *xendev = to_xenbus_device(dev);
+ 	struct device_driver *drv = data;
+@@ -861,14 +861,15 @@ static int is_disconnected_device(struct device *dev, void *data)
+ 		return 0;
+ 
+ 	xendrv = to_xenbus_driver(dev->driver);
+-	return (xendev->state != XenbusStateConnected ||
+-		(xendrv->is_ready && !xendrv->is_ready(xendev)));
++	return (xendev->state < XenbusStateConnected ||
++		(xendev->state == XenbusStateConnected &&
++		 xendrv->is_ready && !xendrv->is_ready(xendev)));
+ }
+ 
+-static int exists_disconnected_device(struct device_driver *drv)
++static int exists_connecting_device(struct device_driver *drv)
+ {
+ 	return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+-				is_disconnected_device);
++				is_device_connecting);
+ }
+ 
+ static int print_device_status(struct device *dev, void *data)
+@@ -884,10 +885,13 @@ static int print_device_status(struct device *dev, void *data)
+ 		/* Information only: is this too noisy? */
+ 		printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
+ 		       xendev->nodename);
+-	} else if (xendev->state != XenbusStateConnected) {
++	} else if (xendev->state < XenbusStateConnected) {
++		enum xenbus_state rstate = XenbusStateUnknown;
++		if (xendev->otherend)
++			rstate = xenbus_read_driver_state(xendev->otherend);
+ 		printk(KERN_WARNING "XENBUS: Timeout connecting "
+-		       "to device: %s (state %d)\n",
+-		       xendev->nodename, xendev->state);
++		       "to device: %s (local state %d, remote state %d)\n",
++		       xendev->nodename, xendev->state, rstate);
+ 	}
+ 
+ 	return 0;
+@@ -897,7 +901,7 @@ static int print_device_status(struct device *dev, void *data)
+ static int ready_to_wait_for_devices;
+ 
+ /*
+- * On a 10 second timeout, wait for all devices currently configured.  We need
++ * On a 5-minute timeout, wait for all devices currently configured.  We need
+  * to do this to guarantee that the filesystems and / or network devices
+  * needed for boot are available, before we can allow the boot to proceed.
+  *
+@@ -912,18 +916,30 @@ static int ready_to_wait_for_devices;
+  */
+ static void wait_for_devices(struct xenbus_driver *xendrv)
+ {
+-	unsigned long timeout = jiffies + 10*HZ;
++	unsigned long start = jiffies;
+ 	struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
++	unsigned int seconds_waited = 0;
+ 
+ 	if (!ready_to_wait_for_devices || !xen_domain())
+ 		return;
+ 
+-	while (exists_disconnected_device(drv)) {
+-		if (time_after(jiffies, timeout))
+-			break;
++	while (exists_connecting_device(drv)) {
++		if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
++			if (!seconds_waited)
++				printk(KERN_WARNING "XENBUS: Waiting for "
++				       "devices to initialise: ");
++			seconds_waited += 5;
++			printk("%us...", 300 - seconds_waited);
++			if (seconds_waited == 300)
++				break;
++		}
++
+ 		schedule_timeout_interruptible(HZ/10);
+ 	}
+ 
++	if (seconds_waited)
++		printk("\n");
++
+ 	bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+ 			 print_device_status);
+ }
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 63ea83f..3bbcaa7 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2287,12 +2287,12 @@ int
+ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
+ 		char *mount_data_global, const char *devname)
+ {
+-	int rc = 0;
++	int rc;
+ 	int xid;
+ 	struct smb_vol *volume_info;
+-	struct cifsSesInfo *pSesInfo = NULL;
+-	struct cifsTconInfo *tcon = NULL;
+-	struct TCP_Server_Info *srvTcp = NULL;
++	struct cifsSesInfo *pSesInfo;
++	struct cifsTconInfo *tcon;
++	struct TCP_Server_Info *srvTcp;
+ 	char   *full_path;
+ 	char *mount_data = mount_data_global;
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+@@ -2301,6 +2301,10 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
+ 	int referral_walks_count = 0;
+ try_mount_again:
+ #endif
++	rc = 0;
++	tcon = NULL;
++	pSesInfo = NULL;
++	srvTcp = NULL;
+ 	full_path = NULL;
+ 
+ 	xid = GetXid();
+@@ -2597,6 +2601,7 @@ remote_path_check:
+ 
+ 			cleanup_volume_info(&volume_info);
+ 			referral_walks_count++;
++			FreeXid(xid);
+ 			goto try_mount_again;
+ 		}
+ #else /* No DFS support, return error on mount */
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index bd2a9dd..d0a2afb 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -698,6 +698,10 @@ struct ext4_inode_info {
+ 	__u16 i_extra_isize;
+ 
+ 	spinlock_t i_block_reservation_lock;
++#ifdef CONFIG_QUOTA
++	/* quota space reservation, managed internally by quota code */
++	qsize_t i_reserved_quota;
++#endif
+ 
+ 	/* completed async DIOs that might need unwritten extents handling */
+ 	struct list_head i_aio_dio_complete_list;
+@@ -1432,7 +1436,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
+ extern int ext4_block_truncate_page(handle_t *handle,
+ 		struct address_space *mapping, loff_t from);
+ extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
+-extern qsize_t ext4_get_reserved_space(struct inode *inode);
++extern qsize_t *ext4_get_reserved_space(struct inode *inode);
+ extern int flush_aio_dio_completed_IO(struct inode *inode);
+ /* ioctl.c */
+ extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 1dae9a4..e233879 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1045,17 +1045,12 @@ out:
+ 	return err;
+ }
+ 
+-qsize_t ext4_get_reserved_space(struct inode *inode)
++#ifdef CONFIG_QUOTA
++qsize_t *ext4_get_reserved_space(struct inode *inode)
+ {
+-	unsigned long long total;
+-
+-	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+-	total = EXT4_I(inode)->i_reserved_data_blocks +
+-		EXT4_I(inode)->i_reserved_meta_blocks;
+-	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+-
+-	return (total << inode->i_blkbits);
++	return &EXT4_I(inode)->i_reserved_quota;
+ }
++#endif
+ /*
+  * Calculate the number of metadata blocks need to reserve
+  * to allocate @blocks for non extent file based file
+@@ -1858,19 +1853,17 @@ repeat:
+ 
+ 	md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
+ 	total = md_needed + nrblocks;
++	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+ 
+ 	/*
+ 	 * Make quota reservation here to prevent quota overflow
+ 	 * later. Real quota accounting is done at pages writeout
+ 	 * time.
+ 	 */
+-	if (vfs_dq_reserve_block(inode, total)) {
+-		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
++	if (vfs_dq_reserve_block(inode, total))
+ 		return -EDQUOT;
+-	}
+ 
+ 	if (ext4_claim_free_blocks(sbi, total)) {
+-		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+ 		vfs_dq_release_reservation_block(inode, total);
+ 		if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+ 			yield();
+@@ -1878,10 +1871,11 @@ repeat:
+ 		}
+ 		return -ENOSPC;
+ 	}
++	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+ 	EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
+-	EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
+-
++	EXT4_I(inode)->i_reserved_meta_blocks += md_needed;
+ 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
++
+ 	return 0;       /* success */
+ }
+ 
+@@ -4850,6 +4844,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+ 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
+ 	inode->i_size = ext4_isize(raw_inode);
+ 	ei->i_disksize = inode->i_size;
++#ifdef CONFIG_QUOTA
++	ei->i_reserved_quota = 0;
++#endif
+ 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
+ 	ei->i_block_group = iloc.block_group;
+ 	ei->i_last_alloc_group = ~0;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 9ae5217..92943f2 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -704,6 +704,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
+ 	ei->i_allocated_meta_blocks = 0;
+ 	ei->i_delalloc_reserved_flag = 0;
+ 	spin_lock_init(&(ei->i_block_reservation_lock));
++#ifdef CONFIG_QUOTA
++	ei->i_reserved_quota = 0;
++#endif
+ 	INIT_LIST_HEAD(&ei->i_aio_dio_complete_list);
+ 	ei->cur_aio_dio = NULL;
+ 	ei->i_sync_tid = 0;
+@@ -1001,7 +1004,9 @@ static const struct dquot_operations ext4_quota_operations = {
+ 	.reserve_space	= dquot_reserve_space,
+ 	.claim_space	= dquot_claim_space,
+ 	.release_rsv	= dquot_release_reserved_space,
++#ifdef CONFIG_QUOTA
+ 	.get_reserved_space = ext4_get_reserved_space,
++#endif
+ 	.alloc_inode	= dquot_alloc_inode,
+ 	.free_space	= dquot_free_space,
+ 	.free_inode	= dquot_free_inode,
+diff --git a/fs/namei.c b/fs/namei.c
+index d11f404..a2b3c28 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -234,6 +234,7 @@ int generic_permission(struct inode *inode, int mask,
+ 	/*
+ 	 * Searching includes executable on directories, else just read.
+ 	 */
++	mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
+ 	if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
+ 		if (capable(CAP_DAC_READ_SEARCH))
+ 			return 0;
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 39b49c4..c4d07a8 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -1388,6 +1388,67 @@ void vfs_dq_drop(struct inode *inode)
+ EXPORT_SYMBOL(vfs_dq_drop);
+ 
+ /*
++ * inode_reserved_space is managed internally by quota, and protected by
++ * i_lock similar to i_blocks+i_bytes.
++ */
++static qsize_t *inode_reserved_space(struct inode * inode)
++{
++	/* Filesystem must explicitly define it's own method in order to use
++	 * quota reservation interface */
++	BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
++	return inode->i_sb->dq_op->get_reserved_space(inode);
++}
++
++static void inode_add_rsv_space(struct inode *inode, qsize_t number)
++{
++	spin_lock(&inode->i_lock);
++	*inode_reserved_space(inode) += number;
++	spin_unlock(&inode->i_lock);
++}
++
++
++static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
++{
++	spin_lock(&inode->i_lock);
++	*inode_reserved_space(inode) -= number;
++	__inode_add_bytes(inode, number);
++	spin_unlock(&inode->i_lock);
++}
++
++static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
++{
++	spin_lock(&inode->i_lock);
++	*inode_reserved_space(inode) -= number;
++	spin_unlock(&inode->i_lock);
++}
++
++static qsize_t inode_get_rsv_space(struct inode *inode)
++{
++	qsize_t ret;
++	spin_lock(&inode->i_lock);
++	ret = *inode_reserved_space(inode);
++	spin_unlock(&inode->i_lock);
++	return ret;
++}
++
++static void inode_incr_space(struct inode *inode, qsize_t number,
++				int reserve)
++{
++	if (reserve)
++		inode_add_rsv_space(inode, number);
++	else
++		inode_add_bytes(inode, number);
++}
++
++static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
++{
++	if (reserve)
++		inode_sub_rsv_space(inode, number);
++	else
++		inode_sub_bytes(inode, number);
++}
++
++/*
+  * Following four functions update i_blocks+i_bytes fields and
+  * quota information (together with appropriate checks)
+  * NOTE: We absolutely rely on the fact that caller dirties
+@@ -1405,6 +1466,21 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
+ 	int cnt, ret = QUOTA_OK;
+ 	char warntype[MAXQUOTAS];
+ 
++	/*
++	 * First test before acquiring mutex - solves deadlocks when we
++	 * re-enter the quota code and are already holding the mutex
++	 */
++	if (IS_NOQUOTA(inode)) {
++		inode_incr_space(inode, number, reserve);
++		goto out;
++	}
++
++	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
++	if (IS_NOQUOTA(inode)) {
++		inode_incr_space(inode, number, reserve);
++		goto out_unlock;
++	}
++
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ 		warntype[cnt] = QUOTA_NL_NOWARN;
+ 
+@@ -1415,7 +1491,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
+ 		if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
+ 		    == NO_QUOTA) {
+ 			ret = NO_QUOTA;
+-			goto out_unlock;
++			spin_unlock(&dq_data_lock);
++			goto out_flush_warn;
+ 		}
+ 	}
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+@@ -1426,64 +1503,32 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
+ 		else
+ 			dquot_incr_space(inode->i_dquot[cnt], number);
+ 	}
+-	if (!reserve)
+-		inode_add_bytes(inode, number);
+-out_unlock:
++	inode_incr_space(inode, number, reserve);
+ 	spin_unlock(&dq_data_lock);
+-	flush_warnings(inode->i_dquot, warntype);
+-	return ret;
+-}
+-
+-int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+-{
+-	int cnt, ret = QUOTA_OK;
+-
+-	/*
+-	 * First test before acquiring mutex - solves deadlocks when we
+-	 * re-enter the quota code and are already holding the mutex
+-	 */
+-	if (IS_NOQUOTA(inode)) {
+-		inode_add_bytes(inode, number);
+-		goto out;
+-	}
+-
+-	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-	if (IS_NOQUOTA(inode)) {
+-		inode_add_bytes(inode, number);
+-		goto out_unlock;
+-	}
+-
+-	ret = __dquot_alloc_space(inode, number, warn, 0);
+-	if (ret == NO_QUOTA)
+-		goto out_unlock;
+ 
++	if (reserve)
++		goto out_flush_warn;
+ 	/* Dirtify all the dquots - this can block when journalling */
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ 		if (inode->i_dquot[cnt])
+ 			mark_dquot_dirty(inode->i_dquot[cnt]);
++out_flush_warn:
++	flush_warnings(inode->i_dquot, warntype);
+ out_unlock:
+ 	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ out:
+ 	return ret;
+ }
++
++int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
++{
++	return __dquot_alloc_space(inode, number, warn, 0);
++}
+ EXPORT_SYMBOL(dquot_alloc_space);
+ 
+ int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
+ {
+-	int ret = QUOTA_OK;
+-
+-	if (IS_NOQUOTA(inode))
+-		goto out;
+-
+-	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-	if (IS_NOQUOTA(inode))
+-		goto out_unlock;
+-
+-	ret = __dquot_alloc_space(inode, number, warn, 1);
+-out_unlock:
+-	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-out:
+-	return ret;
++	return __dquot_alloc_space(inode, number, warn, 1);
+ }
+ EXPORT_SYMBOL(dquot_reserve_space);
+ 
+@@ -1540,14 +1585,14 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
+ 	int ret = QUOTA_OK;
+ 
+ 	if (IS_NOQUOTA(inode)) {
+-		inode_add_bytes(inode, number);
++		inode_claim_rsv_space(inode, number);
+ 		goto out;
+ 	}
+ 
+ 	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ 	if (IS_NOQUOTA(inode))	{
+ 		up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-		inode_add_bytes(inode, number);
++		inode_claim_rsv_space(inode, number);
+ 		goto out;
+ 	}
+ 
+@@ -1559,7 +1604,7 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
+ 							number);
+ 	}
+ 	/* Update inode bytes */
+-	inode_add_bytes(inode, number);
++	inode_claim_rsv_space(inode, number);
+ 	spin_unlock(&dq_data_lock);
+ 	/* Dirtify all the dquots - this can block when journalling */
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+@@ -1572,38 +1617,9 @@ out:
+ EXPORT_SYMBOL(dquot_claim_space);
+ 
+ /*
+- * Release reserved quota space
+- */
+-void dquot_release_reserved_space(struct inode *inode, qsize_t number)
+-{
+-	int cnt;
+-
+-	if (IS_NOQUOTA(inode))
+-		goto out;
+-
+-	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-	if (IS_NOQUOTA(inode))
+-		goto out_unlock;
+-
+-	spin_lock(&dq_data_lock);
+-	/* Release reserved dquots */
+-	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+-		if (inode->i_dquot[cnt])
+-			dquot_free_reserved_space(inode->i_dquot[cnt], number);
+-	}
+-	spin_unlock(&dq_data_lock);
+-
+-out_unlock:
+-	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-out:
+-	return;
+-}
+-EXPORT_SYMBOL(dquot_release_reserved_space);
+-
+-/*
+  * This operation can block, but only after everything is updated
+  */
+-int dquot_free_space(struct inode *inode, qsize_t number)
++int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
+ {
+ 	unsigned int cnt;
+ 	char warntype[MAXQUOTAS];
+@@ -1612,7 +1628,7 @@ int dquot_free_space(struct inode *inode, qsize_t number)
+          * re-enter the quota code and are already holding the mutex */
+ 	if (IS_NOQUOTA(inode)) {
+ out_sub:
+-		inode_sub_bytes(inode, number);
++		inode_decr_space(inode, number, reserve);
+ 		return QUOTA_OK;
+ 	}
+ 
+@@ -1627,21 +1643,43 @@ out_sub:
+ 		if (!inode->i_dquot[cnt])
+ 			continue;
+ 		warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
+-		dquot_decr_space(inode->i_dquot[cnt], number);
++		if (reserve)
++			dquot_free_reserved_space(inode->i_dquot[cnt], number);
++		else
++			dquot_decr_space(inode->i_dquot[cnt], number);
+ 	}
+-	inode_sub_bytes(inode, number);
++	inode_decr_space(inode, number, reserve);
+ 	spin_unlock(&dq_data_lock);
++
++	if (reserve)
++		goto out_unlock;
+ 	/* Dirtify all the dquots - this can block when journalling */
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ 		if (inode->i_dquot[cnt])
+ 			mark_dquot_dirty(inode->i_dquot[cnt]);
++out_unlock:
+ 	flush_warnings(inode->i_dquot, warntype);
+ 	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ 	return QUOTA_OK;
+ }
++
++int dquot_free_space(struct inode *inode, qsize_t number)
++{
++	return  __dquot_free_space(inode, number, 0);
++}
+ EXPORT_SYMBOL(dquot_free_space);
+ 
+ /*
++ * Release reserved quota space
++ */
++void dquot_release_reserved_space(struct inode *inode, qsize_t number)
++{
++	__dquot_free_space(inode, number, 1);
++
++}
++EXPORT_SYMBOL(dquot_release_reserved_space);
++
++/*
+  * This operation can block, but only after everything is updated
+  */
+ int dquot_free_inode(const struct inode *inode, qsize_t number)
+@@ -1679,19 +1717,6 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
+ EXPORT_SYMBOL(dquot_free_inode);
+ 
+ /*
+- * call back function, get reserved quota space from underlying fs
+- */
+-qsize_t dquot_get_reserved_space(struct inode *inode)
+-{
+-	qsize_t reserved_space = 0;
+-
+-	if (sb_any_quota_active(inode->i_sb) &&
+-	    inode->i_sb->dq_op->get_reserved_space)
+-		reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
+-	return reserved_space;
+-}
+-
+-/*
+  * Transfer the number of inode and blocks from one diskquota to an other.
+  *
+  * This operation can block, but only after everything is updated
+@@ -1734,7 +1759,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
+ 	}
+ 	spin_lock(&dq_data_lock);
+ 	cur_space = inode_get_bytes(inode);
+-	rsv_space = dquot_get_reserved_space(inode);
++	rsv_space = inode_get_rsv_space(inode);
+ 	space = cur_space + rsv_space;
+ 	/* Build the transfer_from list and check the limits */
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+diff --git a/fs/stat.c b/fs/stat.c
+index 075694e..c4ecd52 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -401,9 +401,9 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename,
+ }
+ #endif /* __ARCH_WANT_STAT64 */
+ 
+-void inode_add_bytes(struct inode *inode, loff_t bytes)
++/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
++void __inode_add_bytes(struct inode *inode, loff_t bytes)
+ {
+-	spin_lock(&inode->i_lock);
+ 	inode->i_blocks += bytes >> 9;
+ 	bytes &= 511;
+ 	inode->i_bytes += bytes;
+@@ -411,6 +411,12 @@ void inode_add_bytes(struct inode *inode, loff_t bytes)
+ 		inode->i_blocks++;
+ 		inode->i_bytes -= 512;
+ 	}
++}
++
++void inode_add_bytes(struct inode *inode, loff_t bytes)
++{
++	spin_lock(&inode->i_lock);
++	__inode_add_bytes(inode, bytes);
+ 	spin_unlock(&inode->i_lock);
+ }
+ 
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 9d1b8c2..1e4543c 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1078,21 +1078,39 @@ static int udf_fill_partdesc_info(struct super_block *sb,
+ 	return 0;
+ }
+ 
+-static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
++static void udf_find_vat_block(struct super_block *sb, int p_index,
++			       int type1_index, sector_t start_block)
+ {
+ 	struct udf_sb_info *sbi = UDF_SB(sb);
+ 	struct udf_part_map *map = &sbi->s_partmaps[p_index];
++	sector_t vat_block;
+ 	struct kernel_lb_addr ino;
++
++	/*
++	 * VAT file entry is in the last recorded block. Some broken disks have
++	 * it a few blocks before so try a bit harder...
++	 */
++	ino.partitionReferenceNum = type1_index;
++	for (vat_block = start_block;
++	     vat_block >= map->s_partition_root &&
++	     vat_block >= start_block - 3 &&
++	     !sbi->s_vat_inode; vat_block--) {
++		ino.logicalBlockNum = vat_block - map->s_partition_root;
++		sbi->s_vat_inode = udf_iget(sb, &ino);
++	}
++}
++
++static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
++{
++	struct udf_sb_info *sbi = UDF_SB(sb);
++	struct udf_part_map *map = &sbi->s_partmaps[p_index];
+ 	struct buffer_head *bh = NULL;
+ 	struct udf_inode_info *vati;
+ 	uint32_t pos;
+ 	struct virtualAllocationTable20 *vat20;
+ 	sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
+ 
+-	/* VAT file entry is in the last recorded block */
+-	ino.partitionReferenceNum = type1_index;
+-	ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
+-	sbi->s_vat_inode = udf_iget(sb, &ino);
++	udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
+ 	if (!sbi->s_vat_inode &&
+ 	    sbi->s_last_block != blocks - 1) {
+ 		printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
+@@ -1100,9 +1118,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
+ 		       "block of the device (%lu).\n",
+ 		       (unsigned long)sbi->s_last_block,
+ 		       (unsigned long)blocks - 1);
+-		ino.partitionReferenceNum = type1_index;
+-		ino.logicalBlockNum = blocks - 1 - map->s_partition_root;
+-		sbi->s_vat_inode = udf_iget(sb, &ino);
++		udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
+ 	}
+ 	if (!sbi->s_vat_inode)
+ 		return 1;
+diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
+index 789cf5f..d77b547 100644
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_active_mask;
+ #define num_online_cpus()	cpumask_weight(cpu_online_mask)
+ #define num_possible_cpus()	cpumask_weight(cpu_possible_mask)
+ #define num_present_cpus()	cpumask_weight(cpu_present_mask)
++#define num_active_cpus()	cpumask_weight(cpu_active_mask)
+ #define cpu_online(cpu)		cpumask_test_cpu((cpu), cpu_online_mask)
+ #define cpu_possible(cpu)	cpumask_test_cpu((cpu), cpu_possible_mask)
+ #define cpu_present(cpu)	cpumask_test_cpu((cpu), cpu_present_mask)
+@@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_active_mask;
+ #define num_online_cpus()	1
+ #define num_possible_cpus()	1
+ #define num_present_cpus()	1
++#define num_active_cpus()	1
+ #define cpu_online(cpu)		((cpu) == 0)
+ #define cpu_possible(cpu)	((cpu) == 0)
+ #define cpu_present(cpu)	((cpu) == 0)
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 2620a8c..98ea200 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2314,6 +2314,7 @@ extern const struct inode_operations page_symlink_inode_operations;
+ extern int generic_readlink(struct dentry *, char __user *, int);
+ extern void generic_fillattr(struct inode *, struct kstat *);
+ extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
++void __inode_add_bytes(struct inode *inode, loff_t bytes);
+ void inode_add_bytes(struct inode *inode, loff_t bytes);
+ void inode_sub_bytes(struct inode *inode, loff_t bytes);
+ loff_t inode_get_bytes(struct inode *inode);
+diff --git a/include/linux/quota.h b/include/linux/quota.h
+index 78c4889..8fd8efc 100644
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -313,8 +313,9 @@ struct dquot_operations {
+ 	int (*claim_space) (struct inode *, qsize_t);
+ 	/* release rsved quota for delayed alloc */
+ 	void (*release_rsv) (struct inode *, qsize_t);
+-	/* get reserved quota for delayed alloc */
+-	qsize_t (*get_reserved_space) (struct inode *);
++	/* get reserved quota for delayed alloc, value returned is managed by
++	 * quota code only */
++	qsize_t *(*get_reserved_space) (struct inode *);
+ };
+ 
+ /* Operations handling requests from userspace */
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 239e40d..d40d23f 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -95,8 +95,13 @@ struct seq_file;
+ extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
+ extern int cap_netlink_recv(struct sk_buff *skb, int cap);
+ 
++#ifdef CONFIG_MMU
+ extern unsigned long mmap_min_addr;
+ extern unsigned long dac_mmap_min_addr;
++#else
++#define dac_mmap_min_addr	0UL
++#endif
++
+ /*
+  * Values used in the task_security_ops calls
+  */
+@@ -121,6 +126,7 @@ struct request_sock;
+ #define LSM_UNSAFE_PTRACE	2
+ #define LSM_UNSAFE_PTRACE_CAP	4
+ 
++#ifdef CONFIG_MMU
+ /*
+  * If a hint addr is less than mmap_min_addr change hint to be as
+  * low as possible but still greater than mmap_min_addr
+@@ -135,6 +141,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
+ }
+ extern int mmap_min_addr_handler(struct ctl_table *table, int write,
+ 				 void __user *buffer, size_t *lenp, loff_t *ppos);
++#endif
+ 
+ #ifdef CONFIG_SECURITY
+ 
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 2f47e54..69db943 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -342,6 +342,7 @@ enum ip_defrag_users
+ 	IP_DEFRAG_CALL_RA_CHAIN,
+ 	IP_DEFRAG_CONNTRACK_IN,
+ 	IP_DEFRAG_CONNTRACK_OUT,
++	IP_DEFRAG_CONNTRACK_BRIDGE_IN,
+ 	IP_DEFRAG_VS_IN,
+ 	IP_DEFRAG_VS_OUT,
+ 	IP_DEFRAG_VS_FWD
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 8c31d8a..639bbf0 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -354,8 +354,16 @@ static inline int ipv6_prefix_equal(const struct in6_addr *a1,
+ 
+ struct inet_frag_queue;
+ 
++enum ip6_defrag_users {
++	IP6_DEFRAG_LOCAL_DELIVER,
++	IP6_DEFRAG_CONNTRACK_IN,
++	IP6_DEFRAG_CONNTRACK_OUT,
++	IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
++};
++
+ struct ip6_create_arg {
+ 	__be32 id;
++	u32 user;
+ 	struct in6_addr *src;
+ 	struct in6_addr *dst;
+ };
+diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+index abc55ad..1ee717e 100644
+--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
++++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+@@ -9,7 +9,7 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
+ 
+ extern int nf_ct_frag6_init(void);
+ extern void nf_ct_frag6_cleanup(void);
+-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb);
++extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+ extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+ 			       struct net_device *in,
+ 			       struct net_device *out,
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 6ba0f1e..b216886 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -212,6 +212,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ 	err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
+ 					hcpu, -1, &nr_calls);
+ 	if (err == NOTIFY_BAD) {
++		set_cpu_active(cpu, true);
++
+ 		nr_calls--;
+ 		__raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
+ 					  hcpu, nr_calls, NULL);
+@@ -223,11 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ 
+ 	/* Ensure that we are not runnable on dying cpu */
+ 	cpumask_copy(old_allowed, &current->cpus_allowed);
+-	set_cpus_allowed_ptr(current,
+-			     cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
++	set_cpus_allowed_ptr(current, cpu_active_mask);
+ 
+ 	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
+ 	if (err) {
++		set_cpu_active(cpu, true);
+ 		/* CPU didn't die: tell everyone.  Can't complain. */
+ 		if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
+ 					    hcpu) == NOTIFY_BAD)
+@@ -292,9 +294,6 @@ int __ref cpu_down(unsigned int cpu)
+ 
+ 	err = _cpu_down(cpu, 0);
+ 
+-	if (cpu_online(cpu))
+-		set_cpu_active(cpu, true);
+-
+ out:
+ 	cpu_maps_update_done();
+ 	stop_machine_destroy();
+@@ -387,6 +386,15 @@ int disable_nonboot_cpus(void)
+ 	 * with the userspace trying to use the CPU hotplug at the same time
+ 	 */
+ 	cpumask_clear(frozen_cpus);
++
++	for_each_online_cpu(cpu) {
++		if (cpu == first_cpu)
++			continue;
++		set_cpu_active(cpu, false);
++	}
++
++	synchronize_sched();
++
+ 	printk("Disabling non-boot CPUs ...\n");
+ 	for_each_online_cpu(cpu) {
+ 		if (cpu == first_cpu)
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index b5cb469..39e5121 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -873,7 +873,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ 		if (retval < 0)
+ 			return retval;
+ 
+-		if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
++		if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
+ 			return -EINVAL;
+ 	}
+ 	retval = validate_change(cs, trialcs);
+@@ -2011,7 +2011,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
+ 		}
+ 
+ 		/* Continue past cpusets with all cpus, mems online */
+-		if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
++		if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
+ 		    nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
+ 			continue;
+ 
+@@ -2020,7 +2020,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
+ 		/* Remove offline cpus and mems from this cpuset. */
+ 		mutex_lock(&callback_mutex);
+ 		cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
+-			    cpu_online_mask);
++			    cpu_active_mask);
+ 		nodes_and(cp->mems_allowed, cp->mems_allowed,
+ 						node_states[N_HIGH_MEMORY]);
+ 		mutex_unlock(&callback_mutex);
+@@ -2058,8 +2058,10 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
+ 	switch (phase) {
+ 	case CPU_ONLINE:
+ 	case CPU_ONLINE_FROZEN:
+-	case CPU_DEAD:
+-	case CPU_DEAD_FROZEN:
++	case CPU_DOWN_PREPARE:
++	case CPU_DOWN_PREPARE_FROZEN:
++	case CPU_DOWN_FAILED:
++	case CPU_DOWN_FAILED_FROZEN:
+ 		break;
+ 
+ 	default:
+@@ -2068,7 +2070,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
+ 
+ 	cgroup_lock();
+ 	mutex_lock(&callback_mutex);
+-	cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
++	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
+ 	mutex_unlock(&callback_mutex);
+ 	scan_for_empty_cpusets(&top_cpuset);
+ 	ndoms = generate_sched_domains(&doms, &attr);
+@@ -2115,7 +2117,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
+ 
+ void __init cpuset_init_smp(void)
+ {
+-	cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
++	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
+ 	top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
+ 
+ 	hotcpu_notifier(cpuset_track_online_cpus, 0);
+diff --git a/kernel/sched.c b/kernel/sched.c
+index d079a9f..dd0dccd 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2036,6 +2036,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
+ {
+ 	s64 delta;
+ 
++	if (p->sched_class != &fair_sched_class)
++		return 0;
++
+ 	/*
+ 	 * Buddy candidates are cache hot:
+ 	 */
+@@ -2044,9 +2047,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
+ 			 &p->se == cfs_rq_of(&p->se)->last))
+ 		return 1;
+ 
+-	if (p->sched_class != &fair_sched_class)
+-		return 0;
+-
+ 	if (sysctl_sched_migration_cost == -1)
+ 		return 1;
+ 	if (sysctl_sched_migration_cost == 0)
+@@ -4139,7 +4139,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+ 	unsigned long flags;
+ 	struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
+ 
+-	cpumask_copy(cpus, cpu_online_mask);
++	cpumask_copy(cpus, cpu_active_mask);
+ 
+ 	/*
+ 	 * When power savings policy is enabled for the parent domain, idle
+@@ -4302,7 +4302,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
+ 	int all_pinned = 0;
+ 	struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
+ 
+-	cpumask_copy(cpus, cpu_online_mask);
++	cpumask_copy(cpus, cpu_active_mask);
+ 
+ 	/*
+ 	 * When power savings policy is enabled for the parent domain, idle
+@@ -4699,7 +4699,7 @@ int select_nohz_load_balancer(int stop_tick)
+ 		cpumask_set_cpu(cpu, nohz.cpu_mask);
+ 
+ 		/* time for ilb owner also to sleep */
+-		if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
++		if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
+ 			if (atomic_read(&nohz.load_balancer) == cpu)
+ 				atomic_set(&nohz.load_balancer, -1);
+ 			return 0;
+@@ -7075,7 +7075,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+ 	int ret = 0;
+ 
+ 	rq = task_rq_lock(p, &flags);
+-	if (!cpumask_intersects(new_mask, cpu_online_mask)) {
++	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -7097,7 +7097,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+ 	if (cpumask_test_cpu(task_cpu(p), new_mask))
+ 		goto out;
+ 
+-	if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
++	if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
+ 		/* Need help from migration thread: drop lock and wait. */
+ 		struct task_struct *mt = rq->migration_thread;
+ 
+@@ -7251,19 +7251,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
+ 
+ again:
+ 	/* Look for allowed, online CPU in same node. */
+-	for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
++	for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+ 		if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+ 			goto move;
+ 
+ 	/* Any allowed, online CPU? */
+-	dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
++	dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
+ 	if (dest_cpu < nr_cpu_ids)
+ 		goto move;
+ 
+ 	/* No more Mr. Nice Guy. */
+ 	if (dest_cpu >= nr_cpu_ids) {
+ 		cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
+-		dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
++		dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+ 
+ 		/*
+ 		 * Don't tell them about moving exiting tasks or
+@@ -7292,7 +7292,7 @@ move:
+  */
+ static void migrate_nr_uninterruptible(struct rq *rq_src)
+ {
+-	struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
++	struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
+@@ -7546,7 +7546,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+ static struct ctl_table_header *sd_sysctl_header;
+ static void register_sched_domain_sysctl(void)
+ {
+-	int i, cpu_num = num_online_cpus();
++	int i, cpu_num = num_possible_cpus();
+ 	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
+ 	char buf[32];
+ 
+@@ -7556,7 +7556,7 @@ static void register_sched_domain_sysctl(void)
+ 	if (entry == NULL)
+ 		return;
+ 
+-	for_each_online_cpu(i) {
++	for_each_possible_cpu(i) {
+ 		snprintf(buf, 32, "cpu%d", i);
+ 		entry->procname = kstrdup(buf, GFP_KERNEL);
+ 		entry->mode = 0555;
+@@ -7925,6 +7925,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
+ 
+ static void free_rootdomain(struct root_domain *rd)
+ {
++	synchronize_sched();
++
+ 	cpupri_cleanup(&rd->cpupri);
+ 
+ 	free_cpumask_var(rd->rto_mask);
+@@ -9042,7 +9044,7 @@ match1:
+ 	if (doms_new == NULL) {
+ 		ndoms_cur = 0;
+ 		doms_new = fallback_doms;
+-		cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
++		cpumask_andnot(&doms_new[0], cpu_active_mask, cpu_isolated_map);
+ 		WARN_ON_ONCE(dattr_new);
+ 	}
+ 
+@@ -9173,8 +9175,10 @@ static int update_sched_domains(struct notifier_block *nfb,
+ 	switch (action) {
+ 	case CPU_ONLINE:
+ 	case CPU_ONLINE_FROZEN:
+-	case CPU_DEAD:
+-	case CPU_DEAD_FROZEN:
++	case CPU_DOWN_PREPARE:
++	case CPU_DOWN_PREPARE_FROZEN:
++	case CPU_DOWN_FAILED:
++	case CPU_DOWN_FAILED_FROZEN:
+ 		partition_sched_domains(1, NULL, NULL);
+ 		return NOTIFY_OK;
+ 
+@@ -9221,7 +9225,7 @@ void __init sched_init_smp(void)
+ #endif
+ 	get_online_cpus();
+ 	mutex_lock(&sched_domains_mutex);
+-	arch_init_sched_domains(cpu_online_mask);
++	arch_init_sched_domains(cpu_active_mask);
+ 	cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
+ 	if (cpumask_empty(non_isolated_cpus))
+ 		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 5488a5d..199228b 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -1374,6 +1374,9 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
+ 
+ 	rcu_read_lock();
+ 	for_each_domain(cpu, tmp) {
++		if (!(tmp->flags & SD_LOAD_BALANCE))
++			continue;
++
+ 		/*
+ 		 * If power savings logic is enabled for a domain, see if we
+ 		 * are not overloaded, if so, don't balance wider.
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 0d949c5..dd84be9 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1200,6 +1200,7 @@ static struct ctl_table vm_table[] = {
+ 		.extra2		= (void *)&hugetlb_infinity,
+ 	},
+ #endif
++#ifdef CONFIG_MMU
+ 	{
+ 		.ctl_name	= VM_LOWMEM_RESERVE_RATIO,
+ 		.procname	= "lowmem_reserve_ratio",
+@@ -1353,6 +1354,7 @@ static struct ctl_table vm_table[] = {
+ 		.mode		= 0644,
+ 		.proc_handler	= &mmap_min_addr_handler,
+ 	},
++#endif
+ #ifdef CONFIG_NUMA
+ 	{
+ 		.ctl_name	= CTL_UNNUMBERED,
+@@ -1605,7 +1607,8 @@ static struct ctl_table debug_table[] = {
+ 		.data		= &show_unhandled_signals,
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &zero,
+ 	},
+ #endif
+ 	{ .ctl_name = 0 }
+diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
+index 620b58a..9484be4 100644
+--- a/kernel/time/clockevents.c
++++ b/kernel/time/clockevents.c
+@@ -237,8 +237,9 @@ void clockevents_exchange_device(struct clock_event_device *old,
+  */
+ void clockevents_notify(unsigned long reason, void *arg)
+ {
+-	struct list_head *node, *tmp;
++	struct clock_event_device *dev, *tmp;
+ 	unsigned long flags;
++	int cpu;
+ 
+ 	spin_lock_irqsave(&clockevents_lock, flags);
+ 	clockevents_do_notify(reason, arg);
+@@ -249,8 +250,19 @@ void clockevents_notify(unsigned long reason, void *arg)
+ 		 * Unregister the clock event devices which were
+ 		 * released from the users in the notify chain.
+ 		 */
+-		list_for_each_safe(node, tmp, &clockevents_released)
+-			list_del(node);
++		list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
++			list_del(&dev->list);
++		/*
++		 * Now check whether the CPU has left unused per cpu devices
++		 */
++		cpu = *((int *)arg);
++		list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
++			if (cpumask_test_cpu(cpu, dev->cpumask) &&
++			    cpumask_weight(dev->cpumask) == 1) {
++				BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
++				list_del(&dev->list);
++			}
++		}
+ 		break;
+ 	default:
+ 		break;
+diff --git a/lib/dma-debug.c b/lib/dma-debug.c
+index ce6b7ea..5a77c7c 100644
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -670,12 +670,13 @@ static int device_dma_allocations(struct device *dev)
+ 	return count;
+ }
+ 
+-static int dma_debug_device_change(struct notifier_block *nb,
+-				    unsigned long action, void *data)
++static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
+ {
+ 	struct device *dev = data;
+ 	int count;
+ 
++	if (global_disable)
++		return 0;
+ 
+ 	switch (action) {
+ 	case BUS_NOTIFY_UNBOUND_DRIVER:
+@@ -697,6 +698,9 @@ void dma_debug_add_bus(struct bus_type *bus)
+ {
+ 	struct notifier_block *nb;
+ 
++	if (global_disable)
++		return;
++
+ 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
+ 	if (nb == NULL) {
+ 		pr_err("dma_debug_add_bus: out of memory\n");
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 44cf6f0..2c19c0b 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -227,6 +227,7 @@ config KSM
+ 
+ config DEFAULT_MMAP_MIN_ADDR
+         int "Low address space to protect from user allocation"
++	depends on MMU
+         default 4096
+         help
+ 	  This is the portion of low virtual memory which should be protected
+diff --git a/mm/internal.h b/mm/internal.h
+index 22ec8d2..17bc0df 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -107,9 +107,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
+ }
+ 
+ /*
+- * must be called with vma's mmap_sem held for read, and page locked.
++ * must be called with vma's mmap_sem held for read or write, and page locked.
+  */
+ extern void mlock_vma_page(struct page *page);
++extern void munlock_vma_page(struct page *page);
+ 
+ /*
+  * Clear the page's PageMlocked().  This can be useful in a situation where
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 5575f86..e9501f8 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -34,6 +34,7 @@
+ #include <linux/ksm.h>
+ 
+ #include <asm/tlbflush.h>
++#include "internal.h"
+ 
+ /*
+  * A few notes about the KSM scanning process,
+@@ -767,15 +768,14 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
+ 	 * ptes are necessarily already write-protected.  But in either
+ 	 * case, we need to lock and check page_count is not raised.
+ 	 */
+-	if (write_protect_page(vma, oldpage, &orig_pte)) {
+-		unlock_page(oldpage);
+-		goto out_putpage;
+-	}
+-	unlock_page(oldpage);
+-
+-	if (pages_identical(oldpage, newpage))
++	if (write_protect_page(vma, oldpage, &orig_pte) == 0 &&
++	    pages_identical(oldpage, newpage))
+ 		err = replace_page(vma, oldpage, newpage, orig_pte);
+ 
++	if ((vma->vm_flags & VM_LOCKED) && !err)
++		munlock_vma_page(oldpage);
++
++	unlock_page(oldpage);
+ out_putpage:
+ 	put_page(oldpage);
+ 	put_page(newpage);
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 6314015..5dc1037 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -758,7 +758,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
+ 	task_unlock(task);
+ 	if (!curr)
+ 		return 0;
+-	if (curr->use_hierarchy)
++	/*
++	 * We should check use_hierarchy of "mem" not "curr". Because checking
++	 * use_hierarchy of "curr" here make this function true if hierarchy is
++	 * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
++	 * hierarchy(even if use_hierarchy is disabled in "mem").
++	 */
++	if (mem->use_hierarchy)
+ 		ret = css_is_ancestor(&curr->css, &mem->css);
+ 	else
+ 		ret = (curr == mem);
+diff --git a/mm/mlock.c b/mm/mlock.c
+index bd6f0e4..2e05c97 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -99,14 +99,14 @@ void mlock_vma_page(struct page *page)
+  * not get another chance to clear PageMlocked.  If we successfully
+  * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
+  * mapping the page, it will restore the PageMlocked state, unless the page
+- * is mapped in a non-linear vma.  So, we go ahead and SetPageMlocked(),
++ * is mapped in a non-linear vma.  So, we go ahead and ClearPageMlocked(),
+  * perhaps redundantly.
+  * If we lose the isolation race, and the page is mapped by other VM_LOCKED
+  * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
+  * either of which will restore the PageMlocked state by calling
+  * mlock_vma_page() above, if it can grab the vma's mmap sem.
+  */
+-static void munlock_vma_page(struct page *page)
++void munlock_vma_page(struct page *page)
+ {
+ 	BUG_ON(!PageLocked(page));
+ 
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index ea2147d..9092b43 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -404,7 +404,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ 		cpuset_print_task_mems_allowed(current);
+ 		task_unlock(current);
+ 		dump_stack();
+-		mem_cgroup_print_oom_info(mem, current);
++		mem_cgroup_print_oom_info(mem, p);
+ 		show_mem();
+ 		if (sysctl_oom_dump_tasks)
+ 			dump_tasks(mem);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 777af57..692807f 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1464,20 +1464,26 @@ static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
+ 	return low;
+ }
+ 
++static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
++				int file)
++{
++	if (file)
++		return inactive_file_is_low(zone, sc);
++	else
++		return inactive_anon_is_low(zone, sc);
++}
++
+ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
+ 	struct zone *zone, struct scan_control *sc, int priority)
+ {
+ 	int file = is_file_lru(lru);
+ 
+-	if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
+-		shrink_active_list(nr_to_scan, zone, sc, priority, file);
++	if (is_active_lru(lru)) {
++		if (inactive_list_is_low(zone, sc, file))
++		    shrink_active_list(nr_to_scan, zone, sc, priority, file);
+ 		return 0;
+ 	}
+ 
+-	if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
+-		shrink_active_list(nr_to_scan, zone, sc, priority, file);
+-		return 0;
+-	}
+ 	return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
+ }
+ 
+diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
+index fa2d6b6..331ead3 100644
+--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
++++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
+@@ -14,6 +14,7 @@
+ #include <net/route.h>
+ #include <net/ip.h>
+ 
++#include <linux/netfilter_bridge.h>
+ #include <linux/netfilter_ipv4.h>
+ #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+ 
+@@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
+ 	return err;
+ }
+ 
++static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
++					      struct sk_buff *skb)
++{
++#ifdef CONFIG_BRIDGE_NETFILTER
++	if (skb->nf_bridge &&
++	    skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
++		return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
++#endif
++	if (hooknum == NF_INET_PRE_ROUTING)
++		return IP_DEFRAG_CONNTRACK_IN;
++	else
++		return IP_DEFRAG_CONNTRACK_OUT;
++}
++
+ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
+ 					  struct sk_buff *skb,
+ 					  const struct net_device *in,
+@@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
+ #endif
+ 	/* Gather fragments. */
+ 	if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
+-		if (nf_ct_ipv4_gather_frags(skb,
+-					    hooknum == NF_INET_PRE_ROUTING ?
+-					    IP_DEFRAG_CONNTRACK_IN :
+-					    IP_DEFRAG_CONNTRACK_OUT))
++		enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
++		if (nf_ct_ipv4_gather_frags(skb, user))
+ 			return NF_STOLEN;
+ 	}
+ 	return NF_ACCEPT;
+diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+index 5f2ec20..0956eba 100644
+--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+@@ -20,6 +20,7 @@
+ #include <net/ipv6.h>
+ #include <net/inet_frag.h>
+ 
++#include <linux/netfilter_bridge.h>
+ #include <linux/netfilter_ipv6.h>
+ #include <net/netfilter/nf_conntrack.h>
+ #include <net/netfilter/nf_conntrack_helper.h>
+@@ -187,6 +188,21 @@ out:
+ 	return nf_conntrack_confirm(skb);
+ }
+ 
++static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
++						struct sk_buff *skb)
++{
++#ifdef CONFIG_BRIDGE_NETFILTER
++	if (skb->nf_bridge &&
++	    skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
++		return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
++#endif
++	if (hooknum == NF_INET_PRE_ROUTING)
++		return IP6_DEFRAG_CONNTRACK_IN;
++	else
++		return IP6_DEFRAG_CONNTRACK_OUT;
++
++}
++
+ static unsigned int ipv6_defrag(unsigned int hooknum,
+ 				struct sk_buff *skb,
+ 				const struct net_device *in,
+@@ -199,8 +215,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
+ 	if (skb->nfct)
+ 		return NF_ACCEPT;
+ 
+-	reasm = nf_ct_frag6_gather(skb);
+-
++	reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
+ 	/* queued */
+ 	if (reasm == NULL)
+ 		return NF_STOLEN;
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index f3aba25..4b6a539 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -170,13 +170,14 @@ out:
+ /* Creation primitives. */
+ 
+ static __inline__ struct nf_ct_frag6_queue *
+-fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
++fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
+ {
+ 	struct inet_frag_queue *q;
+ 	struct ip6_create_arg arg;
+ 	unsigned int hash;
+ 
+ 	arg.id = id;
++	arg.user = user;
+ 	arg.src = src;
+ 	arg.dst = dst;
+ 
+@@ -561,7 +562,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
+ 	return 0;
+ }
+ 
+-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
++struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
+ {
+ 	struct sk_buff *clone;
+ 	struct net_device *dev = skb->dev;
+@@ -607,7 +608,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
+ 	if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
+ 		nf_ct_frag6_evictor();
+ 
+-	fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
++	fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
+ 	if (fq == NULL) {
+ 		pr_debug("Can't find and can't create new queue\n");
+ 		goto ret_orig;
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index da5bd0e..4d18699 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -72,6 +72,7 @@ struct frag_queue
+ 	struct inet_frag_queue	q;
+ 
+ 	__be32			id;		/* fragment id		*/
++	u32			user;
+ 	struct in6_addr		saddr;
+ 	struct in6_addr		daddr;
+ 
+@@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
+ 	struct ip6_create_arg *arg = a;
+ 
+ 	fq = container_of(q, struct frag_queue, q);
+-	return (fq->id == arg->id &&
++	return (fq->id == arg->id && fq->user == arg->user &&
+ 			ipv6_addr_equal(&fq->saddr, arg->src) &&
+ 			ipv6_addr_equal(&fq->daddr, arg->dst));
+ }
+@@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
+ 	struct ip6_create_arg *arg = a;
+ 
+ 	fq->id = arg->id;
++	fq->user = arg->user;
+ 	ipv6_addr_copy(&fq->saddr, arg->src);
+ 	ipv6_addr_copy(&fq->daddr, arg->dst);
+ }
+@@ -244,6 +246,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
+ 	unsigned int hash;
+ 
+ 	arg.id = id;
++	arg.user = IP6_DEFRAG_LOCAL_DELIVER;
+ 	arg.src = src;
+ 	arg.dst = dst;
+ 
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index f1362f3..fbffce9 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -455,6 +455,10 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
+ 
+ 	ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
+ 
++	if (time_before(jiffies, ifibss->last_scan_completed +
++		       IEEE80211_IBSS_MERGE_INTERVAL))
++		return;
++
+ 	if (ieee80211_sta_active_ibss(sdata))
+ 		return;
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index dc5049d..f13d181 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -904,6 +904,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
+ 	sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
+ 				IEEE80211_STA_BEACON_POLL);
+ 
++	/*
++	 * Always handle WMM once after association regardless
++	 * of the first value the AP uses. Setting -1 here has
++	 * that effect because the AP values is an unsigned
++	 * 4-bit value.
++	 */
++	sdata->u.mgd.wmm_last_param_set = -1;
++
+ 	ieee80211_led_assoc(local, 1);
+ 
+ 	sdata->vif.bss_conf.assoc = 1;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index eaa4118..d398197 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1401,6 +1401,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
+ 
+ 	if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
+ 	    local->hw.conf.dynamic_ps_timeout > 0 &&
++	    !local->quiescing &&
+ 	    !(local->scanning) && local->ps_sdata) {
+ 		if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+ 			ieee80211_stop_queues_by_reason(&local->hw,
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index cbc5d20..51e0bd2 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -1031,7 +1031,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+ 
+ 	/* restart hardware */
+ 	if (local->open_count) {
++		/*
++		 * Upon resume hardware can sometimes be goofy due to
++		 * various platform / driver / bus issues, so restarting
++		 * the device may at times not work immediately. Propagate
++		 * the error.
++		 */
+ 		res = drv_start(local);
++		if (res) {
++			WARN(local->suspended, "Harware became unavailable "
++			     "upon resume. This is could be a software issue"
++			     "prior to suspend or a harware issue\n");
++			return res;
++		}
+ 
+ 		ieee80211_led_radio(local, true);
+ 	}
+diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
+index 0a6b7a0..0d86248 100644
+--- a/net/wireless/mlme.c
++++ b/net/wireless/mlme.c
+@@ -94,7 +94,18 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
+ 			}
+ 		}
+ 
+-		WARN_ON(!bss);
++		/*
++		 * We might be coming here because the driver reported
++		 * a successful association at the same time as the
++		 * user requested a deauth. In that case, we will have
++		 * removed the BSS from the auth_bsses list due to the
++		 * deauth request when the assoc response makes it. If
++		 * the two code paths acquire the lock the other way
++		 * around, that's just the standard situation of a
++		 * deauth being requested while connected.
++		 */
++		if (!bss)
++			goto out;
+ 	} else if (wdev->conn) {
+ 		cfg80211_sme_failed_assoc(wdev);
+ 		need_connect_result = false;
+diff --git a/security/Makefile b/security/Makefile
+index 95ecc06..510bbc8 100644
+--- a/security/Makefile
++++ b/security/Makefile
+@@ -8,7 +8,8 @@ subdir-$(CONFIG_SECURITY_SMACK)		+= smack
+ subdir-$(CONFIG_SECURITY_TOMOYO)        += tomoyo
+ 
+ # always enable default capabilities
+-obj-y		+= commoncap.o min_addr.o
++obj-y					+= commoncap.o
++obj-$(CONFIG_MMU)			+= min_addr.o
+ 
+ # Object file lists
+ obj-$(CONFIG_SECURITY)			+= security.o capability.o
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 06ec722..1cad4c7 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -1236,6 +1236,7 @@ long keyctl_get_security(key_serial_t keyid,
+  */
+ long keyctl_session_to_parent(void)
+ {
++#ifdef TIF_NOTIFY_RESUME
+ 	struct task_struct *me, *parent;
+ 	const struct cred *mycred, *pcred;
+ 	struct cred *cred, *oldcred;
+@@ -1326,6 +1327,15 @@ not_permitted:
+ error_keyring:
+ 	key_ref_put(keyring_r);
+ 	return ret;
++
++#else /* !TIF_NOTIFY_RESUME */
++	/*
++	 * To be removed when TIF_NOTIFY_RESUME has been implemented on
++	 * m68k/xtensa
++	 */
++#warning TIF_NOTIFY_RESUME not implemented
++	return -EOPNOTSUPP;
++#endif /* !TIF_NOTIFY_RESUME */
+ }
+ 
+ /*****************************************************************************/
+diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
+index 8691f4c..f1d9d16 100644
+--- a/sound/mips/sgio2audio.c
++++ b/sound/mips/sgio2audio.c
+@@ -609,7 +609,7 @@ static int snd_sgio2audio_pcm_hw_params(struct snd_pcm_substream *substream,
+ 	/* alloc virtual 'dma' area */
+ 	if (runtime->dma_area)
+ 		vfree(runtime->dma_area);
+-	runtime->dma_area = vmalloc(size);
++	runtime->dma_area = vmalloc_user(size);
+ 	if (runtime->dma_area == NULL)
+ 		return -ENOMEM;
+ 	runtime->dma_bytes = size;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 7058371..e40d31f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9141,6 +9141,8 @@ static struct alc_config_preset alc882_presets[] = {
+ 		.dac_nids = alc883_dac_nids,
+ 		.num_adc_nids = ARRAY_SIZE(alc889_adc_nids),
+ 		.adc_nids = alc889_adc_nids,
++		.capsrc_nids = alc889_capsrc_nids,
++		.capsrc_nids = alc889_capsrc_nids,
+ 		.dig_out_nid = ALC883_DIGOUT_NID,
+ 		.dig_in_nid = ALC883_DIGIN_NID,
+ 		.slave_dig_outs = alc883_slave_dig_outs,
+@@ -9187,6 +9189,7 @@ static struct alc_config_preset alc882_presets[] = {
+ 		.dac_nids = alc883_dac_nids,
+ 		.adc_nids = alc883_adc_nids_alt,
+ 		.num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
++		.capsrc_nids = alc883_capsrc_nids,
+ 		.dig_out_nid = ALC883_DIGOUT_NID,
+ 		.num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
+ 		.channel_mode = alc883_3ST_2ch_modes,
+@@ -9333,6 +9336,7 @@ static struct alc_config_preset alc882_presets[] = {
+ 		.dac_nids = alc883_dac_nids,
+ 		.adc_nids = alc883_adc_nids_alt,
+ 		.num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
++		.capsrc_nids = alc883_capsrc_nids,
+ 		.num_channel_mode = ARRAY_SIZE(alc883_sixstack_modes),
+ 		.channel_mode = alc883_sixstack_modes,
+ 		.input_mux = &alc883_capture_source,
+@@ -9394,6 +9398,7 @@ static struct alc_config_preset alc882_presets[] = {
+ 		.dac_nids = alc883_dac_nids,
+ 		.adc_nids = alc883_adc_nids_alt,
+ 		.num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
++		.capsrc_nids = alc883_capsrc_nids,
+ 		.num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
+ 		.channel_mode = alc883_3ST_2ch_modes,
+ 		.input_mux = &alc883_lenovo_101e_capture_source,
+@@ -9573,6 +9578,7 @@ static struct alc_config_preset alc882_presets[] = {
+ 			alc880_gpio1_init_verbs },
+ 		.adc_nids = alc883_adc_nids,
+ 		.num_adc_nids = ARRAY_SIZE(alc883_adc_nids),
++		.capsrc_nids = alc883_capsrc_nids,
+ 		.dac_nids = alc883_dac_nids,
+ 		.num_dacs = ARRAY_SIZE(alc883_dac_nids),
+ 		.channel_mode = alc889A_mb31_6ch_modes,
+diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
+index d057e64..5cfa608 100644
+--- a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
++++ b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
+@@ -51,7 +51,7 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t s
+ 			return 0; /* already enough large */
+ 		vfree(runtime->dma_area);
+ 	}
+-	runtime->dma_area = vmalloc_32(size);
++	runtime->dma_area = vmalloc_32_user(size);
+ 	if (! runtime->dma_area)
+ 		return -ENOMEM;
+ 	runtime->dma_bytes = size;
+diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
+index 98d663a..b0bd1c0 100644
+--- a/sound/soc/codecs/wm8974.c
++++ b/sound/soc/codecs/wm8974.c
+@@ -47,7 +47,7 @@ static const u16 wm8974_reg[WM8974_CACHEREGNUM] = {
+ };
+ 
+ #define WM8974_POWER1_BIASEN  0x08
+-#define WM8974_POWER1_BUFIOEN 0x10
++#define WM8974_POWER1_BUFIOEN 0x04
+ 
+ struct wm8974_priv {
+ 	struct snd_soc_codec codec;
+diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
+index 1fd4e88..e9123f5 100644
+--- a/sound/soc/codecs/wm9712.c
++++ b/sound/soc/codecs/wm9712.c
+@@ -464,7 +464,8 @@ static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
+ {
+ 	u16 *cache = codec->reg_cache;
+ 
+-	soc_ac97_ops.write(codec->ac97, reg, val);
++	if (reg < 0x7c)
++		soc_ac97_ops.write(codec->ac97, reg, val);
+ 	reg = reg >> 1;
+ 	if (reg < (ARRAY_SIZE(wm9712_reg)))
+ 		cache[reg] = val;
+diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
+index 8db0374..8803d9d 100644
+--- a/sound/usb/usbaudio.c
++++ b/sound/usb/usbaudio.c
+@@ -752,7 +752,7 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t s
+ 			return 0; /* already large enough */
+ 		vfree(runtime->dma_area);
+ 	}
+-	runtime->dma_area = vmalloc(size);
++	runtime->dma_area = vmalloc_user(size);
+ 	if (!runtime->dma_area)
+ 		return -ENOMEM;
+ 	runtime->dma_bytes = size;

Modified: dists/trunk/linux-2.6/debian/patches/series/4
==============================================================================
--- dists/trunk/linux-2.6/debian/patches/series/4	Tue Jan  5 03:14:04 2010	(r14892)
+++ dists/trunk/linux-2.6/debian/patches/series/4	Thu Jan  7 02:59:12 2010	(r14893)
@@ -2,3 +2,5 @@
 + features/arm/early-printk.patch
 + bugfix/all/via-velocity-give-rx-descriptors-later.patch
 + bugfix/all/dmfe-tulip-Let-dmfe-handle-DM910x-except-SPARC-onboard.patch
+- bugfix/all/radeon-fix-crtc-vblank-update-for-r600.patch
++ bugfix/all/stable/2.6.32.3.patch



More information about the Kernel-svn-changes mailing list