Merge branch 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6

* 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6: (362 commits)
  V4L-DVB: cx88-dvb: remove extra attribution for core
  V4L/DVB: v4l: soc_camera: fix bound checking of mbus_fmt[] index
  V4L/DVB: Add support for SMT7020 to cx88
  V4L/DVB: radio-si470x: Use UTF-8 encoding on a comment
  V4L/DVB: MAINTAINERS: Telegent tlg2300 section fix
  V4L/DVB: gspca_stv06xx: Add support for camera button
  V4L/DVB: gspca_ov519: add support for the button on ov511 based cams
  V4L/DVB: gspca_ov519: Add support for the button on ov518 based cams
  V4L/DVB: gspca_ov519: add support for the button on ov519 based cams
  V4L/DVB: gspca_main: Fix a compile error when CONFIG_INPUT is not set
  V4L/DVB: gspca_main: some input error handling fixes
  V4L/DVB: gspca_main: Allow use of input device creation code for non int. inputs
  V4L/DVB: gspca_pac7302: much improved exposure control
  V4L/DVB: gspca_sonixb: Make sonixb driver handle pas106 and pas202 cameras
  V4L/DVB: gspca_sonixb: pas106: fixup bright ctrl and add gain and exposure ctrls
  V4L/DVB: Documentation: gspca.txt: update known mr97310a cams
  V4L/DVB: gspca_mr97310a: add support for the Sakar 1638x CyberPix
  V4L/DVB: gscpa_sonixb: limit ov7630 max framerate at 640x480
  V4L/DVB: gspca_sonixb: pas202: fixup brightness ctrl and add gain and exposure ctrls
  V4L/DVB: gscpa_sonixb: Differentiate between sensors with a coarse and fine expo ctrl
  ...
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index da42ab4..b231414 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -377,3 +377,27 @@
 	All the functionality of flush_icache_page can be implemented in
 	flush_dcache_page and update_mmu_cache. In 2.7 the hope is to
 	remove this interface completely.
+
+The final category of APIs is for I/O to deliberately aliased address
+ranges inside the kernel.  Such aliases are set up by use of the
+vmap/vmalloc API.  Since kernel I/O goes via physical pages, the I/O
+subsystem assumes that the user mapping and kernel offset mapping are
+the only aliases.  This isn't true for vmap aliases, so anything in
+the kernel trying to do I/O to vmap areas must manually manage
+coherency.  It must do this by flushing the vmap range before doing
+I/O and invalidating it after the I/O returns.
+
+  void flush_kernel_vmap_range(void *vaddr, int size)
+       flushes the kernel cache for a given virtual address range in
+       the vmap area.  This is to make sure that any data the kernel
+       modified in the vmap range is made visible to the physical
+       page.  The design is to make this area safe to perform I/O on.
+       Note that this API does *not* also flush the offset map alias
+       of the area.
+
+  void invalidate_kernel_vmap_range(void *vaddr, int size) invalidates
+       the cache for a given virtual address range in the vmap area
+       which prevents the processor from making the cache stale by
+       speculatively reading data while the I/O was occurring to the
+       physical pages.  This is only necessary for data reads into the
+       vmap area.
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 3ad6ace..d9bcffd 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -69,7 +69,6 @@
 bbootsect
 bin2c
 binkernel.spec
-binoffset
 bootsect
 bounds.h
 bsetup
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index e7848a0..3e69c1c 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1948,8 +1948,12 @@
 				IRQ routing is enabled.
 		noacpi		[X86] Do not use ACPI for IRQ routing
 				or for PCI scanning.
-		use_crs		[X86] Use _CRS for PCI resource
-				allocation.
+		use_crs		[X86] Use PCI host bridge window information
+				from ACPI.  On BIOSes from 2008 or later, this
+				is enabled by default.  If you need to use this,
+				please report a bug.
+		nocrs		[X86] Ignore PCI host bridge windows from ACPI.
+			        If you need to use this, please report a bug.
 		routeirq	Do IRQ routing for all PCI devices.
 				This is normally done in pci_enable_device(),
 				so this option is a temporary workaround
@@ -1998,6 +2002,14 @@
 		force	Enable ASPM even on devices that claim not to support it.
 			WARNING: Forcing ASPM on may cause system lockups.
 
+	pcie_pme=	[PCIE,PM] Native PCIe PME signaling options:
+		off	Do not use native PCIe PME signaling.
+		force	Use native PCIe PME signaling even if the BIOS refuses
+			to allow the kernel to control the relevant PCIe config
+			registers.
+		nomsi	Do not use MSI for native PCIe PME signaling (this makes
+			all PCIe root ports use INTx for everything).
+
 	pcmv=		[HW,PCMCIA] BadgePAD 4
 
 	pd.		[PARIDE]
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index 4220851..3119f5d 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -34,7 +34,6 @@
 #include <sys/uio.h>
 #include <termios.h>
 #include <getopt.h>
-#include <zlib.h>
 #include <assert.h>
 #include <sched.h>
 #include <limits.h>
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 17ffa06..3002356 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,19 @@
+1 Release Date    : Thur.  Oct 29, 2009 09:12:45 PST 2009 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Bo Yang
+
+2 Current Version : 00.00.04.17.1-rc1
+3 Older Version   : 00.00.04.12
+
+1.	Add the pad_0 in mfi frame structure to 0 to fix the
+	context value larger than 32bit value issue.
+
+2.	Add the logic drive list to the driver.  Driver will
+	keep the logic drive list internal after driver load.
+
+3.	driver fixed the device update issue after get the AEN
+	PD delete/ADD, LD add/delete from FW.
+
 1 Release Date    : Tues.  July 28, 2009 10:12:45 PST 2009 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Bo Yang
diff --git a/MAINTAINERS b/MAINTAINERS
index f8bd581..5f8c206 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2141,6 +2141,17 @@
 F:	Documentation/fault-injection/
 F:	lib/fault-inject.c
 
+FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
+M:	Robert Love <robert.w.love@intel.com>
+L:	devel@open-fcoe.org
+W:	www.Open-FCoE.org
+S:	Supported
+F:	drivers/scsi/libfc/
+F:	drivers/scsi/fcoe/
+F:	include/scsi/fc/
+F:	include/scsi/libfc.h
+F:	include/scsi/libfcoe.h
+
 FILE LOCKING (flock() and fcntl()/lockf())
 M:	Matthew Wilcox <matthew@wil.cx>
 L:	linux-fsdevel@vger.kernel.org
@@ -2393,6 +2404,12 @@
 S:	Odd Fixes
 F:	drivers/char/hvc_*
 
+VIRTIO CONSOLE DRIVER
+M:	Amit Shah <amit.shah@redhat.com>
+L:	virtualization@lists.linux-foundation.org
+S:	Maintained
+F:	drivers/char/virtio_console.c
+
 GSPCA FINEPIX SUBDRIVER
 M:	Frank Zago <frank@zago.net>
 L:	linux-media@vger.kernel.org
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index a91ba28..c9ab94e 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -126,8 +126,8 @@
 #define MB			(1024*KB)
 #define GB			(1024*MB)
 
-void
-pcibios_align_resource(void *data, struct resource *res,
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
 		       resource_size_t size, resource_size_t align)
 {
 	struct pci_dev *dev = data;
@@ -184,7 +184,7 @@
 		}
 	}
 
-	res->start = start;
+	return start;
 }
 #undef KB
 #undef MB
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 8113bb5..5fe4a2a 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -447,6 +447,16 @@
 	    : "r" (0));
 #endif
 }
+static inline void flush_kernel_vmap_range(void *addr, int size)
+{
+	if ((cache_is_vivt() || cache_is_vipt_aliasing()))
+	  __cpuc_flush_dcache_area(addr, (size_t)size);
+}
+static inline void invalidate_kernel_vmap_range(void *addr, int size)
+{
+	if ((cache_is_vivt() || cache_is_vipt_aliasing()))
+	  __cpuc_flush_dcache_area(addr, (size_t)size);
+}
 
 #define ARCH_HAS_FLUSH_ANON_PAGE
 static inline void flush_anon_page(struct vm_area_struct *vma,
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 8096819..bd397e0 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -616,15 +616,17 @@
  * but we want to try to avoid allocating at 0x2900-0x2bff
  * which might be mirrored at 0x0100-0x03ff..
  */
-void pcibios_align_resource(void *data, struct resource *res,
-			    resource_size_t size, resource_size_t align)
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+				resource_size_t size, resource_size_t align)
 {
 	resource_size_t start = res->start;
 
 	if (res->flags & IORESOURCE_IO && start & 0x300)
 		start = (start + 0x3ff) & ~0x3ff;
 
-	res->start = (start + align - 1) & ~(align - 1);
+	start = (start + align - 1) & ~(align - 1);
+
+	return start;
 }
 
 /**
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c
index f93c596..9bf33b3 100644
--- a/arch/arm/mach-nomadik/cpu-8815.c
+++ b/arch/arm/mach-nomadik/cpu-8815.c
@@ -86,11 +86,19 @@
 	},
 };
 
+static struct amba_device cpu8815_amba_rng = {
+	.dev = {
+		.init_name = "rng",
+	},
+	__MEM_4K_RESOURCE(NOMADIK_RNG_BASE),
+};
+
 static struct amba_device *amba_devs[] __initdata = {
 	cpu8815_amba_gpio + 0,
 	cpu8815_amba_gpio + 1,
 	cpu8815_amba_gpio + 2,
 	cpu8815_amba_gpio + 3,
+	&cpu8815_amba_rng
 };
 
 static int __init cpu8815_init(void)
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c
index 77ee319..d4b9c36 100644
--- a/arch/cris/arch-v32/drivers/pci/bios.c
+++ b/arch/cris/arch-v32/drivers/pci/bios.c
@@ -41,18 +41,16 @@
 	return 0;
 }
 
-void
-pcibios_align_resource(void *data, struct resource *res,
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
 		       resource_size_t size, resource_size_t align)
 {
-	if (res->flags & IORESOURCE_IO) {
-		resource_size_t start = res->start;
+	resource_size_t start = res->start;
 
-		if (start & 0x300) {
-			start = (start + 0x3ff) & ~0x3ff;
-			res->start = start;
-		}
-	}
+	if ((res->flags & IORESOURCE_IO) && (start & 0x300))
+		start = (start + 0x3ff) & ~0x3ff;
+
+	return start
 }
 
 int pcibios_enable_resources(struct pci_dev *dev, int mask)
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 566bdeb..1ed15d7 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -32,18 +32,16 @@
  * but we want to try to avoid allocating at 0x2900-0x2bff
  * which might have be mirrored at 0x0100-0x03ff..
  */
-void
-pcibios_align_resource(void *data, struct resource *res,
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
 		       resource_size_t size, resource_size_t align)
 {
-	if (res->flags & IORESOURCE_IO) {
-		resource_size_t start = res->start;
+	resource_size_t start = res->start;
 
-		if (start & 0x300) {
-			start = (start + 0x3ff) & ~0x3ff;
-			res->start = start;
-		}
-	}
+	if ((res->flags & IORESOURCE_IO) && (start & 0x300))
+		start = (start + 0x3ff) & ~0x3ff;
+
+	return start
 }
 
 
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index e97b255..93997bd 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -98,6 +98,7 @@
 #endif
 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
 static inline void disable_acpi(void) { }
+static inline void pci_acpi_crs_quirks(void) { }
 
 const char *acpi_get_sysname (void);
 int acpi_request_vector (u32 int_type);
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index df639db7..64aff52 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -320,9 +320,9 @@
 static void __devinit
 pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
 {
-	int i, j;
+	int i;
 
-	j = 0;
+	pci_bus_remove_resources(bus);
 	for (i = 0; i < ctrl->windows; i++) {
 		struct resource *res = &ctrl->window[i].resource;
 		/* HP's firmware has a hack to work around a Windows bug.
@@ -330,13 +330,7 @@
 		if ((res->flags & IORESOURCE_MEM) &&
 		    (res->end - res->start < 16))
 			continue;
-		if (j >= PCI_BUS_NUM_RESOURCES) {
-			dev_warn(&bus->dev,
-				 "ignoring host bridge window %pR (no space)\n",
-				 res);
-			continue;
-		}
-		bus->resource[j++] = res;
+		pci_bus_add_resource(bus, res, 0);
 	}
 }
 
@@ -452,13 +446,12 @@
 static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
 {
 	unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
-	struct resource *devr = &dev->resource[idx];
+	struct resource *devr = &dev->resource[idx], *busr;
 
 	if (!dev->bus)
 		return 0;
-	for (i=0; i<PCI_BUS_NUM_RESOURCES; i++) {
-		struct resource *busr = dev->bus->resource[i];
 
+	pci_bus_for_each_resource(dev->bus, busr, i) {
 		if (!busr || ((busr->flags ^ devr->flags) & type_mask))
 			continue;
 		if ((devr->start) && (devr->start >= busr->start) &&
@@ -547,10 +540,11 @@
 		acpi_pci_irq_disable(dev);
 }
 
-void
-pcibios_align_resource (void *data, struct resource *res,
+resource_size_t
+pcibios_align_resource (void *data, const struct resource *res,
 		        resource_size_t size, resource_size_t align)
 {
+	return res->start;
 }
 
 /*
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index cd5837e..b008168 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -130,6 +130,7 @@
 
 config OF
 	def_bool y
+	select OF_FLATTREE
 
 config PROC_DEVICETREE
 	bool "Support for device tree in /proc"
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index ef3ec1d..03f45a9 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -26,31 +26,11 @@
 #include <asm/irq.h>
 #include <asm/atomic.h>
 
-#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT	1
-#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT	1
-
-#define of_compat_cmp(s1, s2, l)	strncasecmp((s1), (s2), (l))
-#define of_prop_cmp(s1, s2)		strcmp((s1), (s2))
-#define of_node_cmp(s1, s2)		strcasecmp((s1), (s2))
-
-extern struct device_node *of_chosen;
-
 #define HAVE_ARCH_DEVTREE_FIXUPS
 
-extern struct device_node *allnodes;	/* temporary while merging */
-extern rwlock_t devtree_lock;	/* temporary while merging */
-
-/* For updating the device tree at runtime */
-extern void of_attach_node(struct device_node *);
-extern void of_detach_node(struct device_node *);
-
 /* Other Prototypes */
 extern int early_uartlite_console(void);
 
-extern struct resource *request_OF_resource(struct device_node *node,
-				int index, const char *name_postfix);
-extern int release_OF_resource(struct device_node *node, int index);
-
 /*
  * OF address retreival & translation
  */
diff --git a/arch/microblaze/kernel/of_platform.c b/arch/microblaze/kernel/of_platform.c
index acf4574..1c6d684 100644
--- a/arch/microblaze/kernel/of_platform.c
+++ b/arch/microblaze/kernel/of_platform.c
@@ -185,7 +185,7 @@
 static int of_dev_phandle_match(struct device *dev, void *data)
 {
 	phandle *ph = data;
-	return to_of_device(dev)->node->linux_phandle == *ph;
+	return to_of_device(dev)->node->phandle == *ph;
 }
 
 struct of_device *of_find_device_by_phandle(phandle ph)
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index b817df1..a15ef6d 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -42,698 +42,21 @@
 #include <asm/sections.h>
 #include <asm/pci-bridge.h>
 
-static int __initdata dt_root_addr_cells;
-static int __initdata dt_root_size_cells;
-
-typedef u32 cell_t;
-
-static struct boot_param_header *initial_boot_params;
-
-/* export that to outside world */
-struct device_node *of_chosen;
-
-static inline char *find_flat_dt_string(u32 offset)
+void __init early_init_dt_scan_chosen_arch(unsigned long node)
 {
-	return ((char *)initial_boot_params) +
-		initial_boot_params->off_dt_strings + offset;
+	/* No Microblaze specific code here */
 }
 
-/**
- * This function is used to scan the flattened device-tree, it is
- * used to extract the memory informations at boot before we can
- * unflatten the tree
- */
-int __init of_scan_flat_dt(int (*it)(unsigned long node,
-				     const char *uname, int depth,
-				     void *data),
-			   void *data)
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 {
-	unsigned long p = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-	int rc = 0;
-	int depth = -1;
-
-	do {
-		u32 tag = *((u32 *)p);
-		char *pathp;
-
-		p += 4;
-		if (tag == OF_DT_END_NODE) {
-			depth--;
-			continue;
-		}
-		if (tag == OF_DT_NOP)
-			continue;
-		if (tag == OF_DT_END)
-			break;
-		if (tag == OF_DT_PROP) {
-			u32 sz = *((u32 *)p);
-			p += 8;
-			if (initial_boot_params->version < 0x10)
-				p = _ALIGN(p, sz >= 8 ? 8 : 4);
-			p += sz;
-			p = _ALIGN(p, 4);
-			continue;
-		}
-		if (tag != OF_DT_BEGIN_NODE) {
-			printk(KERN_WARNING "Invalid tag %x scanning flattened"
-				" device tree !\n", tag);
-			return -EINVAL;
-		}
-		depth++;
-		pathp = (char *)p;
-		p = _ALIGN(p + strlen(pathp) + 1, 4);
-		if ((*pathp) == '/') {
-			char *lp, *np;
-			for (lp = NULL, np = pathp; *np; np++)
-				if ((*np) == '/')
-					lp = np+1;
-			if (lp != NULL)
-				pathp = lp;
-		}
-		rc = it(p, pathp, depth, data);
-		if (rc != 0)
-			break;
-	} while (1);
-
-	return rc;
+	lmb_add(base, size);
 }
 
-unsigned long __init of_get_flat_dt_root(void)
+u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 {
-	unsigned long p = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-
-	while (*((u32 *)p) == OF_DT_NOP)
-		p += 4;
-	BUG_ON(*((u32 *)p) != OF_DT_BEGIN_NODE);
-	p += 4;
-	return _ALIGN(p + strlen((char *)p) + 1, 4);
+	return lmb_alloc(size, align);
 }
 
-/**
- * This function can be used within scan_flattened_dt callback to get
- * access to properties
- */
-void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
-				unsigned long *size)
-{
-	unsigned long p = node;
-
-	do {
-		u32 tag = *((u32 *)p);
-		u32 sz, noff;
-		const char *nstr;
-
-		p += 4;
-		if (tag == OF_DT_NOP)
-			continue;
-		if (tag != OF_DT_PROP)
-			return NULL;
-
-		sz = *((u32 *)p);
-		noff = *((u32 *)(p + 4));
-		p += 8;
-		if (initial_boot_params->version < 0x10)
-			p = _ALIGN(p, sz >= 8 ? 8 : 4);
-
-		nstr = find_flat_dt_string(noff);
-		if (nstr == NULL) {
-			printk(KERN_WARNING "Can't find property index"
-				" name !\n");
-			return NULL;
-		}
-		if (strcmp(name, nstr) == 0) {
-			if (size)
-				*size = sz;
-			return (void *)p;
-		}
-		p += sz;
-		p = _ALIGN(p, 4);
-	} while (1);
-}
-
-int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
-{
-	const char *cp;
-	unsigned long cplen, l;
-
-	cp = of_get_flat_dt_prop(node, "compatible", &cplen);
-	if (cp == NULL)
-		return 0;
-	while (cplen > 0) {
-		if (strncasecmp(cp, compat, strlen(compat)) == 0)
-			return 1;
-		l = strlen(cp) + 1;
-		cp += l;
-		cplen -= l;
-	}
-
-	return 0;
-}
-
-static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
-					unsigned long align)
-{
-	void *res;
-
-	*mem = _ALIGN(*mem, align);
-	res = (void *)*mem;
-	*mem += size;
-
-	return res;
-}
-
-static unsigned long __init unflatten_dt_node(unsigned long mem,
-					unsigned long *p,
-					struct device_node *dad,
-					struct device_node ***allnextpp,
-					unsigned long fpsize)
-{
-	struct device_node *np;
-	struct property *pp, **prev_pp = NULL;
-	char *pathp;
-	u32 tag;
-	unsigned int l, allocl;
-	int has_name = 0;
-	int new_format = 0;
-
-	tag = *((u32 *)(*p));
-	if (tag != OF_DT_BEGIN_NODE) {
-		printk("Weird tag at start of node: %x\n", tag);
-		return mem;
-	}
-	*p += 4;
-	pathp = (char *)*p;
-	l = allocl = strlen(pathp) + 1;
-	*p = _ALIGN(*p + l, 4);
-
-	/* version 0x10 has a more compact unit name here instead of the full
-	 * path. we accumulate the full path size using "fpsize", we'll rebuild
-	 * it later. We detect this because the first character of the name is
-	 * not '/'.
-	 */
-	if ((*pathp) != '/') {
-		new_format = 1;
-		if (fpsize == 0) {
-			/* root node: special case. fpsize accounts for path
-			 * plus terminating zero. root node only has '/', so
-			 * fpsize should be 2, but we want to avoid the first
-			 * level nodes to have two '/' so we use fpsize 1 here
-			 */
-			fpsize = 1;
-			allocl = 2;
-		} else {
-			/* account for '/' and path size minus terminal 0
-			 * already in 'l'
-			 */
-			fpsize += l;
-			allocl = fpsize;
-		}
-	}
-
-	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
-				__alignof__(struct device_node));
-	if (allnextpp) {
-		memset(np, 0, sizeof(*np));
-		np->full_name = ((char *)np) + sizeof(struct device_node);
-		if (new_format) {
-			char *p2 = np->full_name;
-			/* rebuild full path for new format */
-			if (dad && dad->parent) {
-				strcpy(p2, dad->full_name);
-#ifdef DEBUG
-				if ((strlen(p2) + l + 1) != allocl) {
-					pr_debug("%s: p: %d, l: %d, a: %d\n",
-						pathp, (int)strlen(p2),
-						l, allocl);
-				}
-#endif
-				p2 += strlen(p2);
-			}
-			*(p2++) = '/';
-			memcpy(p2, pathp, l);
-		} else
-			memcpy(np->full_name, pathp, l);
-		prev_pp = &np->properties;
-		**allnextpp = np;
-		*allnextpp = &np->allnext;
-		if (dad != NULL) {
-			np->parent = dad;
-			/* we temporarily use the next field as `last_child'*/
-			if (dad->next == NULL)
-				dad->child = np;
-			else
-				dad->next->sibling = np;
-			dad->next = np;
-		}
-		kref_init(&np->kref);
-	}
-	while (1) {
-		u32 sz, noff;
-		char *pname;
-
-		tag = *((u32 *)(*p));
-		if (tag == OF_DT_NOP) {
-			*p += 4;
-			continue;
-		}
-		if (tag != OF_DT_PROP)
-			break;
-		*p += 4;
-		sz = *((u32 *)(*p));
-		noff = *((u32 *)((*p) + 4));
-		*p += 8;
-		if (initial_boot_params->version < 0x10)
-			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
-
-		pname = find_flat_dt_string(noff);
-		if (pname == NULL) {
-			printk(KERN_INFO
-				"Can't find property name in list !\n");
-			break;
-		}
-		if (strcmp(pname, "name") == 0)
-			has_name = 1;
-		l = strlen(pname) + 1;
-		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
-					__alignof__(struct property));
-		if (allnextpp) {
-			if (strcmp(pname, "linux,phandle") == 0) {
-				np->node = *((u32 *)*p);
-				if (np->linux_phandle == 0)
-					np->linux_phandle = np->node;
-			}
-			if (strcmp(pname, "ibm,phandle") == 0)
-				np->linux_phandle = *((u32 *)*p);
-			pp->name = pname;
-			pp->length = sz;
-			pp->value = (void *)*p;
-			*prev_pp = pp;
-			prev_pp = &pp->next;
-		}
-		*p = _ALIGN((*p) + sz, 4);
-	}
-	/* with version 0x10 we may not have the name property, recreate
-	 * it here from the unit name if absent
-	 */
-	if (!has_name) {
-		char *p1 = pathp, *ps = pathp, *pa = NULL;
-		int sz;
-
-		while (*p1) {
-			if ((*p1) == '@')
-				pa = p1;
-			if ((*p1) == '/')
-				ps = p1 + 1;
-			p1++;
-		}
-		if (pa < ps)
-			pa = p1;
-		sz = (pa - ps) + 1;
-		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
-					__alignof__(struct property));
-		if (allnextpp) {
-			pp->name = "name";
-			pp->length = sz;
-			pp->value = pp + 1;
-			*prev_pp = pp;
-			prev_pp = &pp->next;
-			memcpy(pp->value, ps, sz - 1);
-			((char *)pp->value)[sz - 1] = 0;
-			pr_debug("fixed up name for %s -> %s\n", pathp,
-				(char *)pp->value);
-		}
-	}
-	if (allnextpp) {
-		*prev_pp = NULL;
-		np->name = of_get_property(np, "name", NULL);
-		np->type = of_get_property(np, "device_type", NULL);
-
-		if (!np->name)
-			np->name = "<NULL>";
-		if (!np->type)
-			np->type = "<NULL>";
-	}
-	while (tag == OF_DT_BEGIN_NODE) {
-		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
-		tag = *((u32 *)(*p));
-	}
-	if (tag != OF_DT_END_NODE) {
-		printk(KERN_INFO "Weird tag at end of node: %x\n", tag);
-		return mem;
-	}
-	*p += 4;
-	return mem;
-}
-
-/**
- * unflattens the device-tree passed by the firmware, creating the
- * tree of struct device_node. It also fills the "name" and "type"
- * pointers of the nodes so the normal device-tree walking functions
- * can be used (this used to be done by finish_device_tree)
- */
-void __init unflatten_device_tree(void)
-{
-	unsigned long start, mem, size;
-	struct device_node **allnextp = &allnodes;
-
-	pr_debug(" -> unflatten_device_tree()\n");
-
-	/* First pass, scan for size */
-	start = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
-	size = (size | 3) + 1;
-
-	pr_debug("  size is %lx, allocating...\n", size);
-
-	/* Allocate memory for the expanded device tree */
-	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
-	mem = (unsigned long) __va(mem);
-
-	((u32 *)mem)[size / 4] = 0xdeadbeef;
-
-	pr_debug("  unflattening %lx...\n", mem);
-
-	/* Second pass, do actual unflattening */
-	start = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
-	if (*((u32 *)start) != OF_DT_END)
-		printk(KERN_WARNING "Weird tag at end of tree: %08x\n",
-			*((u32 *)start));
-	if (((u32 *)mem)[size / 4] != 0xdeadbeef)
-		printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
-			((u32 *)mem)[size / 4]);
-	*allnextp = NULL;
-
-	/* Get pointer to OF "/chosen" node for use everywhere */
-	of_chosen = of_find_node_by_path("/chosen");
-	if (of_chosen == NULL)
-		of_chosen = of_find_node_by_path("/chosen@0");
-
-	pr_debug(" <- unflatten_device_tree()\n");
-}
-
-#define early_init_dt_scan_drconf_memory(node) 0
-
-static int __init early_init_dt_scan_cpus(unsigned long node,
-					  const char *uname, int depth,
-					  void *data)
-{
-	static int logical_cpuid;
-	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-	const u32 *intserv;
-	int i, nthreads;
-	int found = 0;
-
-	/* We are scanning "cpu" nodes only */
-	if (type == NULL || strcmp(type, "cpu") != 0)
-		return 0;
-
-	/* Get physical cpuid */
-	intserv = of_get_flat_dt_prop(node, "reg", NULL);
-	nthreads = 1;
-
-	/*
-	 * Now see if any of these threads match our boot cpu.
-	 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
-	 */
-	for (i = 0; i < nthreads; i++) {
-		/*
-		 * version 2 of the kexec param format adds the phys cpuid of
-		 * booted proc.
-		 */
-		if (initial_boot_params && initial_boot_params->version >= 2) {
-			if (intserv[i] ==
-					initial_boot_params->boot_cpuid_phys) {
-				found = 1;
-				break;
-			}
-		} else {
-			/*
-			 * Check if it's the boot-cpu, set it's hw index now,
-			 * unfortunately this format did not support booting
-			 * off secondary threads.
-			 */
-			if (of_get_flat_dt_prop(node,
-					"linux,boot-cpu", NULL) != NULL) {
-				found = 1;
-				break;
-			}
-		}
-
-#ifdef CONFIG_SMP
-		/* logical cpu id is always 0 on UP kernels */
-		logical_cpuid++;
-#endif
-	}
-
-	if (found) {
-		pr_debug("boot cpu: logical %d physical %d\n", logical_cpuid,
-			intserv[i]);
-		boot_cpuid = logical_cpuid;
-	}
-
-	return 0;
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-static void __init early_init_dt_check_for_initrd(unsigned long node)
-{
-	unsigned long l;
-	u32 *prop;
-
-	pr_debug("Looking for initrd properties... ");
-
-	prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
-	if (prop) {
-		initrd_start = (unsigned long)
-					__va((u32)of_read_ulong(prop, l/4));
-
-		prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
-		if (prop) {
-			initrd_end = (unsigned long)
-					__va((u32)of_read_ulong(prop, 1/4));
-			initrd_below_start_ok = 1;
-		} else {
-			initrd_start = 0;
-		}
-	}
-
-	pr_debug("initrd_start=0x%lx  initrd_end=0x%lx\n",
-					initrd_start, initrd_end);
-}
-#else
-static inline void early_init_dt_check_for_initrd(unsigned long node)
-{
-}
-#endif /* CONFIG_BLK_DEV_INITRD */
-
-static int __init early_init_dt_scan_chosen(unsigned long node,
-				const char *uname, int depth, void *data)
-{
-	unsigned long l;
-	char *p;
-
-	pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
-
-	if (depth != 1 ||
-		(strcmp(uname, "chosen") != 0 &&
-				strcmp(uname, "chosen@0") != 0))
-		return 0;
-
-#ifdef CONFIG_KEXEC
-	lprop = (u64 *)of_get_flat_dt_prop(node,
-				"linux,crashkernel-base", NULL);
-	if (lprop)
-		crashk_res.start = *lprop;
-
-	lprop = (u64 *)of_get_flat_dt_prop(node,
-				"linux,crashkernel-size", NULL);
-	if (lprop)
-		crashk_res.end = crashk_res.start + *lprop - 1;
-#endif
-
-	early_init_dt_check_for_initrd(node);
-
-	/* Retreive command line */
-	p = of_get_flat_dt_prop(node, "bootargs", &l);
-	if (p != NULL && l > 0)
-		strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
-
-#ifdef CONFIG_CMDLINE
-#ifndef CONFIG_CMDLINE_FORCE
-	if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
-#endif
-		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif /* CONFIG_CMDLINE */
-
-	pr_debug("Command line is: %s\n", cmd_line);
-
-	/* break now */
-	return 1;
-}
-
-static int __init early_init_dt_scan_root(unsigned long node,
-				const char *uname, int depth, void *data)
-{
-	u32 *prop;
-
-	if (depth != 0)
-		return 0;
-
-	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
-	dt_root_size_cells = (prop == NULL) ? 1 : *prop;
-	pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
-
-	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
-	dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
-	pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
-
-	/* break now */
-	return 1;
-}
-
-static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
-{
-	cell_t *p = *cellp;
-
-	*cellp = p + s;
-	return of_read_number(p, s);
-}
-
-static int __init early_init_dt_scan_memory(unsigned long node,
-				const char *uname, int depth, void *data)
-{
-	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-	cell_t *reg, *endp;
-	unsigned long l;
-
-	/* Look for the ibm,dynamic-reconfiguration-memory node */
-/*	if (depth == 1 &&
-		strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
-		return early_init_dt_scan_drconf_memory(node);
-*/
-	/* We are scanning "memory" nodes only */
-	if (type == NULL) {
-		/*
-		 * The longtrail doesn't have a device_type on the
-		 * /memory node, so look for the node called /memory@0.
-		 */
-		if (depth != 1 || strcmp(uname, "memory@0") != 0)
-			return 0;
-	} else if (strcmp(type, "memory") != 0)
-		return 0;
-
-	reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
-	if (reg == NULL)
-		reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
-	if (reg == NULL)
-		return 0;
-
-	endp = reg + (l / sizeof(cell_t));
-
-	pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
-		uname, l, reg[0], reg[1], reg[2], reg[3]);
-
-	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
-		u64 base, size;
-
-		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
-		size = dt_mem_next_cell(dt_root_size_cells, &reg);
-
-		if (size == 0)
-			continue;
-		pr_debug(" - %llx ,  %llx\n", (unsigned long long)base,
-			(unsigned long long)size);
-
-		lmb_add(base, size);
-	}
-	return 0;
-}
-
-#ifdef CONFIG_PHYP_DUMP
-/**
- * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
- *
- * Function to find the largest size we need to reserve
- * during early boot process.
- *
- * It either looks for boot param and returns that OR
- * returns larger of 256 or 5% rounded down to multiples of 256MB.
- *
- */
-static inline unsigned long phyp_dump_calculate_reserve_size(void)
-{
-	unsigned long tmp;
-
-	if (phyp_dump_info->reserve_bootvar)
-		return phyp_dump_info->reserve_bootvar;
-
-	/* divide by 20 to get 5% of value */
-	tmp = lmb_end_of_DRAM();
-	do_div(tmp, 20);
-
-	/* round it down in multiples of 256 */
-	tmp = tmp & ~0x0FFFFFFFUL;
-
-	return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
-}
-
-/**
- * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
- *
- * This routine may reserve memory regions in the kernel only
- * if the system is supported and a dump was taken in last
- * boot instance or if the hardware is supported and the
- * scratch area needs to be setup. In other instances it returns
- * without reserving anything. The memory in case of dump being
- * active is freed when the dump is collected (by userland tools).
- */
-static void __init phyp_dump_reserve_mem(void)
-{
-	unsigned long base, size;
-	unsigned long variable_reserve_size;
-
-	if (!phyp_dump_info->phyp_dump_configured) {
-		printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
-		return;
-	}
-
-	if (!phyp_dump_info->phyp_dump_at_boot) {
-		printk(KERN_INFO "Phyp-dump disabled at boot time\n");
-		return;
-	}
-
-	variable_reserve_size = phyp_dump_calculate_reserve_size();
-
-	if (phyp_dump_info->phyp_dump_is_active) {
-		/* Reserve *everything* above RMR.Area freed by userland tools*/
-		base = variable_reserve_size;
-		size = lmb_end_of_DRAM() - base;
-
-		/* XXX crashed_ram_end is wrong, since it may be beyond
-		 * the memory_limit, it will need to be adjusted. */
-		lmb_reserve(base, size);
-
-		phyp_dump_info->init_reserve_start = base;
-		phyp_dump_info->init_reserve_size = size;
-	} else {
-		size = phyp_dump_info->cpu_state_size +
-			phyp_dump_info->hpte_region_size +
-			variable_reserve_size;
-		base = lmb_end_of_DRAM() - size;
-		lmb_reserve(base, size);
-		phyp_dump_info->init_reserve_start = base;
-		phyp_dump_info->init_reserve_size = size;
-	}
-}
-#else
-static inline void __init phyp_dump_reserve_mem(void) {}
-#endif /* CONFIG_PHYP_DUMP  && CONFIG_PPC_RTAS */
-
 #ifdef CONFIG_EARLY_PRINTK
 /* MS this is Microblaze specifig function */
 static int __init early_init_dt_scan_serial(unsigned long node,
@@ -775,11 +98,6 @@
 	/* Setup flat device-tree pointer */
 	initial_boot_params = params;
 
-#ifdef CONFIG_PHYP_DUMP
-	/* scan tree to see if dump occured during last boot */
-	of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
-#endif
-
 	/* Retrieve various informations from the /chosen node of the
 	 * device-tree, including the platform type, initrd location and
 	 * size, TCE reserve, and more ...
@@ -799,33 +117,18 @@
 
 	pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size());
 
-	pr_debug("Scanning CPUs ...\n");
-
-	/* Retreive CPU related informations from the flat tree
-	 * (altivec support, boot CPU ID, ...)
-	 */
-	of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
-
 	pr_debug(" <- early_init_devtree()\n");
 }
 
-/**
- * Indicates whether the root node has a given value in its
- * compatible property.
- */
-int machine_is_compatible(const char *compat)
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+		unsigned long end)
 {
-	struct device_node *root;
-	int rc = 0;
-
-	root = of_find_node_by_path("/");
-	if (root) {
-		rc = of_device_is_compatible(root, compat);
-		of_node_put(root);
-	}
-	return rc;
+	initrd_start = (unsigned long)__va(start);
+	initrd_end = (unsigned long)__va(end);
+	initrd_below_start_ok = 1;
 }
-EXPORT_SYMBOL(machine_is_compatible);
+#endif
 
 /*******
  *
@@ -838,273 +141,6 @@
  *
  *******/
 
-/**
- *	of_find_node_by_phandle - Find a node given a phandle
- *	@handle:	phandle of the node to find
- *
- *	Returns a node pointer with refcount incremented, use
- *	of_node_put() on it when done.
- */
-struct device_node *of_find_node_by_phandle(phandle handle)
-{
-	struct device_node *np;
-
-	read_lock(&devtree_lock);
-	for (np = allnodes; np != NULL; np = np->allnext)
-		if (np->linux_phandle == handle)
-			break;
-	of_node_get(np);
-	read_unlock(&devtree_lock);
-	return np;
-}
-EXPORT_SYMBOL(of_find_node_by_phandle);
-
-/**
- *	of_node_get - Increment refcount of a node
- *	@node:	Node to inc refcount, NULL is supported to
- *		simplify writing of callers
- *
- *	Returns node.
- */
-struct device_node *of_node_get(struct device_node *node)
-{
-	if (node)
-		kref_get(&node->kref);
-	return node;
-}
-EXPORT_SYMBOL(of_node_get);
-
-static inline struct device_node *kref_to_device_node(struct kref *kref)
-{
-	return container_of(kref, struct device_node, kref);
-}
-
-/**
- *	of_node_release - release a dynamically allocated node
- *	@kref:  kref element of the node to be released
- *
- *	In of_node_put() this function is passed to kref_put()
- *	as the destructor.
- */
-static void of_node_release(struct kref *kref)
-{
-	struct device_node *node = kref_to_device_node(kref);
-	struct property *prop = node->properties;
-
-	/* We should never be releasing nodes that haven't been detached. */
-	if (!of_node_check_flag(node, OF_DETACHED)) {
-		printk(KERN_INFO "WARNING: Bad of_node_put() on %s\n",
-			node->full_name);
-		dump_stack();
-		kref_init(&node->kref);
-		return;
-	}
-
-	if (!of_node_check_flag(node, OF_DYNAMIC))
-		return;
-
-	while (prop) {
-		struct property *next = prop->next;
-		kfree(prop->name);
-		kfree(prop->value);
-		kfree(prop);
-		prop = next;
-
-		if (!prop) {
-			prop = node->deadprops;
-			node->deadprops = NULL;
-		}
-	}
-	kfree(node->full_name);
-	kfree(node->data);
-	kfree(node);
-}
-
-/**
- *	of_node_put - Decrement refcount of a node
- *	@node:	Node to dec refcount, NULL is supported to
- *		simplify writing of callers
- *
- */
-void of_node_put(struct device_node *node)
-{
-	if (node)
-		kref_put(&node->kref, of_node_release);
-}
-EXPORT_SYMBOL(of_node_put);
-
-/*
- * Plug a device node into the tree and global list.
- */
-void of_attach_node(struct device_node *np)
-{
-	unsigned long flags;
-
-	write_lock_irqsave(&devtree_lock, flags);
-	np->sibling = np->parent->child;
-	np->allnext = allnodes;
-	np->parent->child = np;
-	allnodes = np;
-	write_unlock_irqrestore(&devtree_lock, flags);
-}
-
-/*
- * "Unplug" a node from the device tree.  The caller must hold
- * a reference to the node.  The memory associated with the node
- * is not freed until its refcount goes to zero.
- */
-void of_detach_node(struct device_node *np)
-{
-	struct device_node *parent;
-	unsigned long flags;
-
-	write_lock_irqsave(&devtree_lock, flags);
-
-	parent = np->parent;
-	if (!parent)
-		goto out_unlock;
-
-	if (allnodes == np)
-		allnodes = np->allnext;
-	else {
-		struct device_node *prev;
-		for (prev = allnodes;
-		     prev->allnext != np;
-		     prev = prev->allnext)
-			;
-		prev->allnext = np->allnext;
-	}
-
-	if (parent->child == np)
-		parent->child = np->sibling;
-	else {
-		struct device_node *prevsib;
-		for (prevsib = np->parent->child;
-		     prevsib->sibling != np;
-		     prevsib = prevsib->sibling)
-			;
-		prevsib->sibling = np->sibling;
-	}
-
-	of_node_set_flag(np, OF_DETACHED);
-
-out_unlock:
-	write_unlock_irqrestore(&devtree_lock, flags);
-}
-
-/*
- * Add a property to a node
- */
-int prom_add_property(struct device_node *np, struct property *prop)
-{
-	struct property **next;
-	unsigned long flags;
-
-	prop->next = NULL;
-	write_lock_irqsave(&devtree_lock, flags);
-	next = &np->properties;
-	while (*next) {
-		if (strcmp(prop->name, (*next)->name) == 0) {
-			/* duplicate ! don't insert it */
-			write_unlock_irqrestore(&devtree_lock, flags);
-			return -1;
-		}
-		next = &(*next)->next;
-	}
-	*next = prop;
-	write_unlock_irqrestore(&devtree_lock, flags);
-
-#ifdef CONFIG_PROC_DEVICETREE
-	/* try to add to proc as well if it was initialized */
-	if (np->pde)
-		proc_device_tree_add_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
-	return 0;
-}
-
-/*
- * Remove a property from a node.  Note that we don't actually
- * remove it, since we have given out who-knows-how-many pointers
- * to the data using get-property.  Instead we just move the property
- * to the "dead properties" list, so it won't be found any more.
- */
-int prom_remove_property(struct device_node *np, struct property *prop)
-{
-	struct property **next;
-	unsigned long flags;
-	int found = 0;
-
-	write_lock_irqsave(&devtree_lock, flags);
-	next = &np->properties;
-	while (*next) {
-		if (*next == prop) {
-			/* found the node */
-			*next = prop->next;
-			prop->next = np->deadprops;
-			np->deadprops = prop;
-			found = 1;
-			break;
-		}
-		next = &(*next)->next;
-	}
-	write_unlock_irqrestore(&devtree_lock, flags);
-
-	if (!found)
-		return -ENODEV;
-
-#ifdef CONFIG_PROC_DEVICETREE
-	/* try to remove the proc node as well */
-	if (np->pde)
-		proc_device_tree_remove_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
-	return 0;
-}
-
-/*
- * Update a property in a node.  Note that we don't actually
- * remove it, since we have given out who-knows-how-many pointers
- * to the data using get-property.  Instead we just move the property
- * to the "dead properties" list, and add the new property to the
- * property list
- */
-int prom_update_property(struct device_node *np,
-			 struct property *newprop,
-			 struct property *oldprop)
-{
-	struct property **next;
-	unsigned long flags;
-	int found = 0;
-
-	write_lock_irqsave(&devtree_lock, flags);
-	next = &np->properties;
-	while (*next) {
-		if (*next == oldprop) {
-			/* found the node */
-			newprop->next = oldprop->next;
-			*next = newprop;
-			oldprop->next = np->deadprops;
-			np->deadprops = oldprop;
-			found = 1;
-			break;
-		}
-		next = &(*next)->next;
-	}
-	write_unlock_irqrestore(&devtree_lock, flags);
-
-	if (!found)
-		return -ENODEV;
-
-#ifdef CONFIG_PROC_DEVICETREE
-	/* try to add to proc as well if it was initialized */
-	if (np->pde)
-		proc_device_tree_update_prop(np->pde, newprop, oldprop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
-	return 0;
-}
-
 #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
 static struct debugfs_blob_wrapper flat_dt_blob;
 
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 9a11c22..f87f5e1 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -49,8 +49,8 @@
  * but we want to try to avoid allocating at 0x2900-0x2bff
  * which might have be mirrored at 0x0100-0x03ff..
  */
-void
-pcibios_align_resource(void *data, struct resource *res,
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
 		       resource_size_t size, resource_size_t align)
 {
 	struct pci_dev *dev = data;
@@ -73,7 +73,7 @@
 			start = PCIBIOS_MIN_MEM + hose->mem_resource->start;
 	}
 
-	res->start = start;
+	return start;
 }
 
 static void __devinit pcibios_scanbus(struct pci_controller *hose)
diff --git a/arch/mips/pmc-sierra/yosemite/ht.c b/arch/mips/pmc-sierra/yosemite/ht.c
index 678388f..fd22597 100644
--- a/arch/mips/pmc-sierra/yosemite/ht.c
+++ b/arch/mips/pmc-sierra/yosemite/ht.c
@@ -345,14 +345,13 @@
         return pcibios_enable_resources(dev);
 }
 
-void pcibios_align_resource(void *data, struct resource *res,
-                            resource_size_t size, resource_size_t align)
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+				resource_size_t size, resource_size_t align)
 {
         struct pci_dev *dev = data;
+	resource_size_t start = res->start;
 
         if (res->flags & IORESOURCE_IO) {
-                resource_size_t start = res->start;
-
                 /* We need to avoid collisions with `mirrored' VGA ports
                    and other strange ISA hardware, so we always want the
                    addresses kilobyte aligned.  */
@@ -363,8 +362,9 @@
                 }
 
                 start = (start + 1024 - 1) & ~(1024 - 1);
-                res->start = start;
         }
+
+	return start;
 }
 
 struct pci_ops titan_pci_ops = {
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c
index 78cd134..d6119b8 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -31,9 +31,11 @@
  * but we want to try to avoid allocating at 0x2900-0x2bff
  * which might have be mirrored at 0x0100-0x03ff..
  */
-void pcibios_align_resource(void *data, struct resource *res,
-			    resource_size_t size, resource_size_t align)
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+				resource_size_t size, resource_size_t align)
 {
+	resource_size_t start = res->start;
+
 #if 0
 	struct pci_dev *dev = data;
 
@@ -47,14 +49,10 @@
 	       );
 #endif
 
-	if (res->flags & IORESOURCE_IO) {
-		unsigned long start = res->start;
+	if ((res->flags & IORESOURCE_IO) && (start & 0x300))
+		start = (start + 0x3ff) & ~0x3ff;
 
-		if (start & 0x300) {
-			start = (start + 0x3ff) & ~0x3ff;
-			res->start = start;
-		}
-	}
+	return start;
 }
 
 
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 2cb7e75..6d8720a 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -331,12 +331,10 @@
 static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
 {
 	unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
-	struct resource *devr = &dev->resource[idx];
+	struct resource *devr = &dev->resource[idx], *busr;
 
 	if (dev->bus) {
-		for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
-			struct resource *busr = dev->bus->resource[i];
-
+		pci_bus_for_each_resource(dev->bus, busr, i) {
 			if (!busr || (busr->flags ^ devr->flags) & type_mask)
 				continue;
 
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 7a73b61..4772777 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -38,6 +38,18 @@
 
 #define flush_kernel_dcache_range(start,size) \
 	flush_kernel_dcache_range_asm((start), (start)+(size));
+/* vmap range flushes and invalidates.  Architecturally, we don't need
+ * the invalidate, because the CPU should refuse to speculate once an
+ * area has been flushed, so invalidate is left empty */
+static inline void flush_kernel_vmap_range(void *vaddr, int size)
+{
+	unsigned long start = (unsigned long)vaddr;
+
+	flush_kernel_dcache_range_asm(start, start + size);
+}
+static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+}
 
 #define flush_cache_vmap(start, end)		flush_cache_all()
 #define flush_cache_vunmap(start, end)		flush_cache_all()
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 9e74bfe..38372e7 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -257,10 +257,10 @@
  * Since we are just checking candidates, don't use any fields other
  * than res->start.
  */
-void pcibios_align_resource(void *data, struct resource *res,
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 				resource_size_t size, resource_size_t alignment)
 {
-	resource_size_t mask, align;
+	resource_size_t mask, align, start = res->start;
 
 	DBG_RES("pcibios_align_resource(%s, (%p) [%lx,%lx]/%x, 0x%lx, 0x%lx)\n",
 		pci_name(((struct pci_dev *) data)),
@@ -272,10 +272,10 @@
 
 	/* Align to largest of MIN or input size */
 	mask = max(alignment, align) - 1;
-	res->start += mask;
-	res->start &= ~mask;
+	start += mask;
+	start &= ~mask;
 
-	/* The caller updates the end field, we don't.  */
+	return start;
 }
 
 
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ba3948c..50c9af4 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -173,6 +173,7 @@
 
 config OF
 	def_bool y
+	select OF_FLATTREE
 
 config PPC_UDBG_16550
 	bool
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 2ab9cbd..ddd408a 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -23,21 +23,8 @@
 #include <asm/irq.h>
 #include <asm/atomic.h>
 
-#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT	1
-#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT	1
-
-#define of_compat_cmp(s1, s2, l)	strcasecmp((s1), (s2))
-#define of_prop_cmp(s1, s2)		strcmp((s1), (s2))
-#define of_node_cmp(s1, s2)		strcasecmp((s1), (s2))
-
-extern struct device_node *of_chosen;
-
 #define HAVE_ARCH_DEVTREE_FIXUPS
 
-/* For updating the device tree at runtime */
-extern void of_attach_node(struct device_node *);
-extern void of_detach_node(struct device_node *);
-
 #ifdef CONFIG_PPC32
 /*
  * PCI <-> OF matching functions
@@ -52,11 +39,6 @@
 extern void pci_create_OF_bus_map(void);
 #endif
 
-extern struct resource *request_OF_resource(struct device_node* node,
-				int index, const char* name_postfix);
-extern int release_OF_resource(struct device_node* node, int index);
-
-
 /*
  * OF address retreival & translation
  */
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 1a4fc0d..666d08d 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -214,7 +214,7 @@
 static int of_dev_phandle_match(struct device *dev, void *data)
 {
 	phandle *ph = data;
-	return to_of_device(dev)->node->linux_phandle == *ph;
+	return to_of_device(dev)->node->phandle == *ph;
 }
 
 struct of_device *of_find_device_by_phandle(phandle ph)
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index cadbed6..2597f95 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1047,10 +1047,8 @@
 
 	struct pci_dev *dev = bus->self;
 
-	for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
-		if ((res = bus->resource[i]) == NULL)
-			continue;
-		if (!res->flags)
+	pci_bus_for_each_resource(bus, res, i) {
+		if (!res || !res->flags)
 			continue;
 		if (i >= 3 && bus->self->transparent)
 			continue;
@@ -1181,21 +1179,20 @@
  * but we want to try to avoid allocating at 0x2900-0x2bff
  * which might have be mirrored at 0x0100-0x03ff..
  */
-void pcibios_align_resource(void *data, struct resource *res,
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 				resource_size_t size, resource_size_t align)
 {
 	struct pci_dev *dev = data;
+	resource_size_t start = res->start;
 
 	if (res->flags & IORESOURCE_IO) {
-		resource_size_t start = res->start;
-
 		if (skip_isa_ioresource_align(dev))
-			return;
-		if (start & 0x300) {
+			return start;
+		if (start & 0x300)
 			start = (start + 0x3ff) & ~0x3ff;
-			res->start = start;
-		}
 	}
+
+	return start;
 }
 EXPORT_SYMBOL(pcibios_align_resource);
 
@@ -1278,9 +1275,8 @@
 	pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
 		 pci_domain_nr(bus), bus->number);
 
-	for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
-		if ((res = bus->resource[i]) == NULL || !res->flags
-		    || res->start > res->end || res->parent)
+	pci_bus_for_each_resource(bus, res, i) {
+		if (!res || !res->flags || res->start > res->end || res->parent)
 			continue;
 		if (bus->parent == NULL)
 			pr = (res->flags & IORESOURCE_IO) ?
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index ccf56ac..d43fc65 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -224,7 +224,7 @@
 	 * G5 machines... So when something asks for bus 0 io base
 	 * (bus 0 is HT root), we return the AGP one instead.
 	 */
-	if (in_bus == 0 && machine_is_compatible("MacRISC4")) {
+	if (in_bus == 0 && of_machine_is_compatible("MacRISC4")) {
 		struct device_node *agp;
 
 		agp = of_find_compatible_node(NULL, NULL, "u3-agp");
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 4ec3008..43238b2 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -61,365 +61,12 @@
 #define DBG(fmt...)
 #endif
 
-
-static int __initdata dt_root_addr_cells;
-static int __initdata dt_root_size_cells;
-
 #ifdef CONFIG_PPC64
 int __initdata iommu_is_off;
 int __initdata iommu_force_on;
 unsigned long tce_alloc_start, tce_alloc_end;
 #endif
 
-typedef u32 cell_t;
-
-#if 0
-static struct boot_param_header *initial_boot_params __initdata;
-#else
-struct boot_param_header *initial_boot_params;
-#endif
-
-extern struct device_node *allnodes;	/* temporary while merging */
-
-extern rwlock_t devtree_lock;	/* temporary while merging */
-
-/* export that to outside world */
-struct device_node *of_chosen;
-
-static inline char *find_flat_dt_string(u32 offset)
-{
-	return ((char *)initial_boot_params) +
-		initial_boot_params->off_dt_strings + offset;
-}
-
-/**
- * This function is used to scan the flattened device-tree, it is
- * used to extract the memory informations at boot before we can
- * unflatten the tree
- */
-int __init of_scan_flat_dt(int (*it)(unsigned long node,
-				     const char *uname, int depth,
-				     void *data),
-			   void *data)
-{
-	unsigned long p = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-	int rc = 0;
-	int depth = -1;
-
-	do {
-		u32 tag = *((u32 *)p);
-		char *pathp;
-		
-		p += 4;
-		if (tag == OF_DT_END_NODE) {
-			depth --;
-			continue;
-		}
-		if (tag == OF_DT_NOP)
-			continue;
-		if (tag == OF_DT_END)
-			break;
-		if (tag == OF_DT_PROP) {
-			u32 sz = *((u32 *)p);
-			p += 8;
-			if (initial_boot_params->version < 0x10)
-				p = _ALIGN(p, sz >= 8 ? 8 : 4);
-			p += sz;
-			p = _ALIGN(p, 4);
-			continue;
-		}
-		if (tag != OF_DT_BEGIN_NODE) {
-			printk(KERN_WARNING "Invalid tag %x scanning flattened"
-			       " device tree !\n", tag);
-			return -EINVAL;
-		}
-		depth++;
-		pathp = (char *)p;
-		p = _ALIGN(p + strlen(pathp) + 1, 4);
-		if ((*pathp) == '/') {
-			char *lp, *np;
-			for (lp = NULL, np = pathp; *np; np++)
-				if ((*np) == '/')
-					lp = np+1;
-			if (lp != NULL)
-				pathp = lp;
-		}
-		rc = it(p, pathp, depth, data);
-		if (rc != 0)
-			break;		
-	} while(1);
-
-	return rc;
-}
-
-unsigned long __init of_get_flat_dt_root(void)
-{
-	unsigned long p = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-
-	while(*((u32 *)p) == OF_DT_NOP)
-		p += 4;
-	BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE);
-	p += 4;
-	return _ALIGN(p + strlen((char *)p) + 1, 4);
-}
-
-/**
- * This  function can be used within scan_flattened_dt callback to get
- * access to properties
- */
-void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
-				 unsigned long *size)
-{
-	unsigned long p = node;
-
-	do {
-		u32 tag = *((u32 *)p);
-		u32 sz, noff;
-		const char *nstr;
-
-		p += 4;
-		if (tag == OF_DT_NOP)
-			continue;
-		if (tag != OF_DT_PROP)
-			return NULL;
-
-		sz = *((u32 *)p);
-		noff = *((u32 *)(p + 4));
-		p += 8;
-		if (initial_boot_params->version < 0x10)
-			p = _ALIGN(p, sz >= 8 ? 8 : 4);
-
-		nstr = find_flat_dt_string(noff);
-		if (nstr == NULL) {
-			printk(KERN_WARNING "Can't find property index"
-			       " name !\n");
-			return NULL;
-		}
-		if (strcmp(name, nstr) == 0) {
-			if (size)
-				*size = sz;
-			return (void *)p;
-		}
-		p += sz;
-		p = _ALIGN(p, 4);
-	} while(1);
-}
-
-int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
-{
-	const char* cp;
-	unsigned long cplen, l;
-
-	cp = of_get_flat_dt_prop(node, "compatible", &cplen);
-	if (cp == NULL)
-		return 0;
-	while (cplen > 0) {
-		if (strncasecmp(cp, compat, strlen(compat)) == 0)
-			return 1;
-		l = strlen(cp) + 1;
-		cp += l;
-		cplen -= l;
-	}
-
-	return 0;
-}
-
-static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
-				       unsigned long align)
-{
-	void *res;
-
-	*mem = _ALIGN(*mem, align);
-	res = (void *)*mem;
-	*mem += size;
-
-	return res;
-}
-
-static unsigned long __init unflatten_dt_node(unsigned long mem,
-					      unsigned long *p,
-					      struct device_node *dad,
-					      struct device_node ***allnextpp,
-					      unsigned long fpsize)
-{
-	struct device_node *np;
-	struct property *pp, **prev_pp = NULL;
-	char *pathp;
-	u32 tag;
-	unsigned int l, allocl;
-	int has_name = 0;
-	int new_format = 0;
-
-	tag = *((u32 *)(*p));
-	if (tag != OF_DT_BEGIN_NODE) {
-		printk("Weird tag at start of node: %x\n", tag);
-		return mem;
-	}
-	*p += 4;
-	pathp = (char *)*p;
-	l = allocl = strlen(pathp) + 1;
-	*p = _ALIGN(*p + l, 4);
-
-	/* version 0x10 has a more compact unit name here instead of the full
-	 * path. we accumulate the full path size using "fpsize", we'll rebuild
-	 * it later. We detect this because the first character of the name is
-	 * not '/'.
-	 */
-	if ((*pathp) != '/') {
-		new_format = 1;
-		if (fpsize == 0) {
-			/* root node: special case. fpsize accounts for path
-			 * plus terminating zero. root node only has '/', so
-			 * fpsize should be 2, but we want to avoid the first
-			 * level nodes to have two '/' so we use fpsize 1 here
-			 */
-			fpsize = 1;
-			allocl = 2;
-		} else {
-			/* account for '/' and path size minus terminal 0
-			 * already in 'l'
-			 */
-			fpsize += l;
-			allocl = fpsize;
-		}
-	}
-
-
-	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
-				__alignof__(struct device_node));
-	if (allnextpp) {
-		memset(np, 0, sizeof(*np));
-		np->full_name = ((char*)np) + sizeof(struct device_node);
-		if (new_format) {
-			char *p = np->full_name;
-			/* rebuild full path for new format */
-			if (dad && dad->parent) {
-				strcpy(p, dad->full_name);
-#ifdef DEBUG
-				if ((strlen(p) + l + 1) != allocl) {
-					DBG("%s: p: %d, l: %d, a: %d\n",
-					    pathp, (int)strlen(p), l, allocl);
-				}
-#endif
-				p += strlen(p);
-			}
-			*(p++) = '/';
-			memcpy(p, pathp, l);
-		} else
-			memcpy(np->full_name, pathp, l);
-		prev_pp = &np->properties;
-		**allnextpp = np;
-		*allnextpp = &np->allnext;
-		if (dad != NULL) {
-			np->parent = dad;
-			/* we temporarily use the next field as `last_child'*/
-			if (dad->next == 0)
-				dad->child = np;
-			else
-				dad->next->sibling = np;
-			dad->next = np;
-		}
-		kref_init(&np->kref);
-	}
-	while(1) {
-		u32 sz, noff;
-		char *pname;
-
-		tag = *((u32 *)(*p));
-		if (tag == OF_DT_NOP) {
-			*p += 4;
-			continue;
-		}
-		if (tag != OF_DT_PROP)
-			break;
-		*p += 4;
-		sz = *((u32 *)(*p));
-		noff = *((u32 *)((*p) + 4));
-		*p += 8;
-		if (initial_boot_params->version < 0x10)
-			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
-
-		pname = find_flat_dt_string(noff);
-		if (pname == NULL) {
-			printk("Can't find property name in list !\n");
-			break;
-		}
-		if (strcmp(pname, "name") == 0)
-			has_name = 1;
-		l = strlen(pname) + 1;
-		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
-					__alignof__(struct property));
-		if (allnextpp) {
-			if (strcmp(pname, "linux,phandle") == 0) {
-				np->node = *((u32 *)*p);
-				if (np->linux_phandle == 0)
-					np->linux_phandle = np->node;
-			}
-			if (strcmp(pname, "ibm,phandle") == 0)
-				np->linux_phandle = *((u32 *)*p);
-			pp->name = pname;
-			pp->length = sz;
-			pp->value = (void *)*p;
-			*prev_pp = pp;
-			prev_pp = &pp->next;
-		}
-		*p = _ALIGN((*p) + sz, 4);
-	}
-	/* with version 0x10 we may not have the name property, recreate
-	 * it here from the unit name if absent
-	 */
-	if (!has_name) {
-		char *p = pathp, *ps = pathp, *pa = NULL;
-		int sz;
-
-		while (*p) {
-			if ((*p) == '@')
-				pa = p;
-			if ((*p) == '/')
-				ps = p + 1;
-			p++;
-		}
-		if (pa < ps)
-			pa = p;
-		sz = (pa - ps) + 1;
-		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
-					__alignof__(struct property));
-		if (allnextpp) {
-			pp->name = "name";
-			pp->length = sz;
-			pp->value = pp + 1;
-			*prev_pp = pp;
-			prev_pp = &pp->next;
-			memcpy(pp->value, ps, sz - 1);
-			((char *)pp->value)[sz - 1] = 0;
-			DBG("fixed up name for %s -> %s\n", pathp,
-				(char *)pp->value);
-		}
-	}
-	if (allnextpp) {
-		*prev_pp = NULL;
-		np->name = of_get_property(np, "name", NULL);
-		np->type = of_get_property(np, "device_type", NULL);
-
-		if (!np->name)
-			np->name = "<NULL>";
-		if (!np->type)
-			np->type = "<NULL>";
-	}
-	while (tag == OF_DT_BEGIN_NODE) {
-		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
-		tag = *((u32 *)(*p));
-	}
-	if (tag != OF_DT_END_NODE) {
-		printk("Weird tag at end of node: %x\n", tag);
-		return mem;
-	}
-	*p += 4;
-	return mem;
-}
-
 static int __init early_parse_mem(char *p)
 {
 	if (!p)
@@ -446,7 +93,7 @@
 	DBG("-> move_device_tree\n");
 
 	start = __pa(initial_boot_params);
-	size = initial_boot_params->totalsize;
+	size = be32_to_cpu(initial_boot_params->totalsize);
 
 	if ((memory_limit && (start + size) > memory_limit) ||
 			overlaps_crashkernel(start, size)) {
@@ -459,54 +106,6 @@
 	DBG("<- move_device_tree\n");
 }
 
-/**
- * unflattens the device-tree passed by the firmware, creating the
- * tree of struct device_node. It also fills the "name" and "type"
- * pointers of the nodes so the normal device-tree walking functions
- * can be used (this used to be done by finish_device_tree)
- */
-void __init unflatten_device_tree(void)
-{
-	unsigned long start, mem, size;
-	struct device_node **allnextp = &allnodes;
-
-	DBG(" -> unflatten_device_tree()\n");
-
-	/* First pass, scan for size */
-	start = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
-	size = (size | 3) + 1;
-
-	DBG("  size is %lx, allocating...\n", size);
-
-	/* Allocate memory for the expanded device tree */
-	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
-	mem = (unsigned long) __va(mem);
-
-	((u32 *)mem)[size / 4] = 0xdeadbeef;
-
-	DBG("  unflattening %lx...\n", mem);
-
-	/* Second pass, do actual unflattening */
-	start = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
-	if (*((u32 *)start) != OF_DT_END)
-		printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
-	if (((u32 *)mem)[size / 4] != 0xdeadbeef)
-		printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
-		       ((u32 *)mem)[size / 4] );
-	*allnextp = NULL;
-
-	/* Get pointer to OF "/chosen" node for use everywhere */
-	of_chosen = of_find_node_by_path("/chosen");
-	if (of_chosen == NULL)
-		of_chosen = of_find_node_by_path("/chosen@0");
-
-	DBG(" <- unflatten_device_tree()\n");
-}
-
 /*
  * ibm,pa-features is a per-cpu property that contains a string of
  * attribute descriptors, each of which has a 2 byte header plus up
@@ -763,48 +362,9 @@
 	return 0;
 }
 
-#ifdef CONFIG_BLK_DEV_INITRD
-static void __init early_init_dt_check_for_initrd(unsigned long node)
-{
-	unsigned long l;
-	u32 *prop;
-
-	DBG("Looking for initrd properties... ");
-
-	prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
-	if (prop) {
-		initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4));
-
-		prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
-		if (prop) {
-			initrd_end = (unsigned long)
-					__va(of_read_ulong(prop, l/4));
-			initrd_below_start_ok = 1;
-		} else {
-			initrd_start = 0;
-		}
-	}
-
-	DBG("initrd_start=0x%lx  initrd_end=0x%lx\n", initrd_start, initrd_end);
-}
-#else
-static inline void early_init_dt_check_for_initrd(unsigned long node)
-{
-}
-#endif /* CONFIG_BLK_DEV_INITRD */
-
-static int __init early_init_dt_scan_chosen(unsigned long node,
-					    const char *uname, int depth, void *data)
+void __init early_init_dt_scan_chosen_arch(unsigned long node)
 {
 	unsigned long *lprop;
-	unsigned long l;
-	char *p;
-
-	DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
-
-	if (depth != 1 ||
-	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
-		return 0;
 
 #ifdef CONFIG_PPC64
 	/* check if iommu is forced on or off */
@@ -815,17 +375,17 @@
 #endif
 
 	/* mem=x on the command line is the preferred mechanism */
- 	lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
- 	if (lprop)
- 		memory_limit = *lprop;
+	lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
+	if (lprop)
+		memory_limit = *lprop;
 
 #ifdef CONFIG_PPC64
- 	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
- 	if (lprop)
- 		tce_alloc_start = *lprop;
- 	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
- 	if (lprop)
- 		tce_alloc_end = *lprop;
+	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
+	if (lprop)
+		tce_alloc_start = *lprop;
+	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
+	if (lprop)
+		tce_alloc_end = *lprop;
 #endif
 
 #ifdef CONFIG_KEXEC
@@ -837,51 +397,6 @@
 	if (lprop)
 		crashk_res.end = crashk_res.start + *lprop - 1;
 #endif
-
-	early_init_dt_check_for_initrd(node);
-
-	/* Retreive command line */
- 	p = of_get_flat_dt_prop(node, "bootargs", &l);
-	if (p != NULL && l > 0)
-		strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
-
-#ifdef CONFIG_CMDLINE
-	if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
-		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif /* CONFIG_CMDLINE */
-
-	DBG("Command line is: %s\n", cmd_line);
-
-	/* break now */
-	return 1;
-}
-
-static int __init early_init_dt_scan_root(unsigned long node,
-					  const char *uname, int depth, void *data)
-{
-	u32 *prop;
-
-	if (depth != 0)
-		return 0;
-
-	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
-	dt_root_size_cells = (prop == NULL) ? 1 : *prop;
-	DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
-
-	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
-	dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
-	DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
-	
-	/* break now */
-	return 1;
-}
-
-static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
-{
-	cell_t *p = *cellp;
-
-	*cellp = p + s;
-	return of_read_number(p, s);
 }
 
 #ifdef CONFIG_PPC_PSERIES
@@ -893,22 +408,22 @@
  */
 static int __init early_init_dt_scan_drconf_memory(unsigned long node)
 {
-	cell_t *dm, *ls, *usm;
+	__be32 *dm, *ls, *usm;
 	unsigned long l, n, flags;
 	u64 base, size, lmb_size;
 	unsigned int is_kexec_kdump = 0, rngs;
 
 	ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
-	if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
+	if (ls == NULL || l < dt_root_size_cells * sizeof(__be32))
 		return 0;
 	lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
 
 	dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
-	if (dm == NULL || l < sizeof(cell_t))
+	if (dm == NULL || l < sizeof(__be32))
 		return 0;
 
 	n = *dm++;	/* number of entries */
-	if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t))
+	if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32))
 		return 0;
 
 	/* check if this is a kexec/kdump kernel. */
@@ -963,66 +478,48 @@
 #define early_init_dt_scan_drconf_memory(node)	0
 #endif /* CONFIG_PPC_PSERIES */
 
-static int __init early_init_dt_scan_memory(unsigned long node,
-					    const char *uname, int depth, void *data)
+static int __init early_init_dt_scan_memory_ppc(unsigned long node,
+						const char *uname,
+						int depth, void *data)
 {
-	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-	cell_t *reg, *endp;
-	unsigned long l;
-
-	/* Look for the ibm,dynamic-reconfiguration-memory node */
 	if (depth == 1 &&
 	    strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
 		return early_init_dt_scan_drconf_memory(node);
-
-	/* We are scanning "memory" nodes only */
-	if (type == NULL) {
-		/*
-		 * The longtrail doesn't have a device_type on the
-		 * /memory node, so look for the node called /memory@0.
-		 */
-		if (depth != 1 || strcmp(uname, "memory@0") != 0)
-			return 0;
-	} else if (strcmp(type, "memory") != 0)
-		return 0;
-
-	reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
-	if (reg == NULL)
-		reg = of_get_flat_dt_prop(node, "reg", &l);
-	if (reg == NULL)
-		return 0;
-
-	endp = reg + (l / sizeof(cell_t));
-
-	DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
-	    uname, l, reg[0], reg[1], reg[2], reg[3]);
-
-	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
-		u64 base, size;
-
-		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
-		size = dt_mem_next_cell(dt_root_size_cells, &reg);
-
-		if (size == 0)
-			continue;
-		DBG(" - %llx ,  %llx\n", (unsigned long long)base,
-		    (unsigned long long)size);
-#ifdef CONFIG_PPC64
-		if (iommu_is_off) {
-			if (base >= 0x80000000ul)
-				continue;
-			if ((base + size) > 0x80000000ul)
-				size = 0x80000000ul - base;
-		}
-#endif
-		lmb_add(base, size);
-
-		memstart_addr = min((u64)memstart_addr, base);
-	}
-
-	return 0;
+	
+	return early_init_dt_scan_memory(node, uname, depth, data);
 }
 
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+#if defined(CONFIG_PPC64)
+	if (iommu_is_off) {
+		if (base >= 0x80000000ul)
+			return;
+		if ((base + size) > 0x80000000ul)
+			size = 0x80000000ul - base;
+	}
+#endif
+
+	lmb_add(base, size);
+
+	memstart_addr = min((u64)memstart_addr, base);
+}
+
+u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+	return lmb_alloc(size, align);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+		unsigned long end)
+{
+	initrd_start = (unsigned long)__va(start);
+	initrd_end = (unsigned long)__va(end);
+	initrd_below_start_ok = 1;
+}
+#endif
+
 static void __init early_reserve_mem(void)
 {
 	u64 base, size;
@@ -1186,7 +683,7 @@
 	/* Scan memory nodes and rebuild LMBs */
 	lmb_init();
 	of_scan_flat_dt(early_init_dt_scan_root, NULL);
-	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+	of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
 
 	/* Save command line for /proc/cmdline and then parse parameters */
 	strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
@@ -1234,25 +731,6 @@
 	DBG(" <- early_init_devtree()\n");
 }
 
-
-/**
- * Indicates whether the root node has a given value in its
- * compatible property.
- */
-int machine_is_compatible(const char *compat)
-{
-	struct device_node *root;
-	int rc = 0;
-
-	root = of_find_node_by_path("/");
-	if (root) {
-		rc = of_device_is_compatible(root, compat);
-		of_node_put(root);
-	}
-	return rc;
-}
-EXPORT_SYMBOL(machine_is_compatible);
-
 /*******
  *
  * New implementation of the OF "find" APIs, return a refcounted
@@ -1265,27 +743,6 @@
  *******/
 
 /**
- *	of_find_node_by_phandle - Find a node given a phandle
- *	@handle:	phandle of the node to find
- *
- *	Returns a node pointer with refcount incremented, use
- *	of_node_put() on it when done.
- */
-struct device_node *of_find_node_by_phandle(phandle handle)
-{
-	struct device_node *np;
-
-	read_lock(&devtree_lock);
-	for (np = allnodes; np != 0; np = np->allnext)
-		if (np->linux_phandle == handle)
-			break;
-	of_node_get(np);
-	read_unlock(&devtree_lock);
-	return np;
-}
-EXPORT_SYMBOL(of_find_node_by_phandle);
-
-/**
  *	of_find_next_cache_node - Find a node's subsidiary cache
  *	@np:	node of type "cpu" or "cache"
  *
@@ -1316,138 +773,6 @@
 	return NULL;
 }
 
-/**
- *	of_node_get - Increment refcount of a node
- *	@node:	Node to inc refcount, NULL is supported to
- *		simplify writing of callers
- *
- *	Returns node.
- */
-struct device_node *of_node_get(struct device_node *node)
-{
-	if (node)
-		kref_get(&node->kref);
-	return node;
-}
-EXPORT_SYMBOL(of_node_get);
-
-static inline struct device_node * kref_to_device_node(struct kref *kref)
-{
-	return container_of(kref, struct device_node, kref);
-}
-
-/**
- *	of_node_release - release a dynamically allocated node
- *	@kref:  kref element of the node to be released
- *
- *	In of_node_put() this function is passed to kref_put()
- *	as the destructor.
- */
-static void of_node_release(struct kref *kref)
-{
-	struct device_node *node = kref_to_device_node(kref);
-	struct property *prop = node->properties;
-
-	/* We should never be releasing nodes that haven't been detached. */
-	if (!of_node_check_flag(node, OF_DETACHED)) {
-		printk("WARNING: Bad of_node_put() on %s\n", node->full_name);
-		dump_stack();
-		kref_init(&node->kref);
-		return;
-	}
-
-	if (!of_node_check_flag(node, OF_DYNAMIC))
-		return;
-
-	while (prop) {
-		struct property *next = prop->next;
-		kfree(prop->name);
-		kfree(prop->value);
-		kfree(prop);
-		prop = next;
-
-		if (!prop) {
-			prop = node->deadprops;
-			node->deadprops = NULL;
-		}
-	}
-	kfree(node->full_name);
-	kfree(node->data);
-	kfree(node);
-}
-
-/**
- *	of_node_put - Decrement refcount of a node
- *	@node:	Node to dec refcount, NULL is supported to
- *		simplify writing of callers
- *
- */
-void of_node_put(struct device_node *node)
-{
-	if (node)
-		kref_put(&node->kref, of_node_release);
-}
-EXPORT_SYMBOL(of_node_put);
-
-/*
- * Plug a device node into the tree and global list.
- */
-void of_attach_node(struct device_node *np)
-{
-	unsigned long flags;
-
-	write_lock_irqsave(&devtree_lock, flags);
-	np->sibling = np->parent->child;
-	np->allnext = allnodes;
-	np->parent->child = np;
-	allnodes = np;
-	write_unlock_irqrestore(&devtree_lock, flags);
-}
-
-/*
- * "Unplug" a node from the device tree.  The caller must hold
- * a reference to the node.  The memory associated with the node
- * is not freed until its refcount goes to zero.
- */
-void of_detach_node(struct device_node *np)
-{
-	struct device_node *parent;
-	unsigned long flags;
-
-	write_lock_irqsave(&devtree_lock, flags);
-
-	parent = np->parent;
-	if (!parent)
-		goto out_unlock;
-
-	if (allnodes == np)
-		allnodes = np->allnext;
-	else {
-		struct device_node *prev;
-		for (prev = allnodes;
-		     prev->allnext != np;
-		     prev = prev->allnext)
-			;
-		prev->allnext = np->allnext;
-	}
-
-	if (parent->child == np)
-		parent->child = np->sibling;
-	else {
-		struct device_node *prevsib;
-		for (prevsib = np->parent->child;
-		     prevsib->sibling != np;
-		     prevsib = prevsib->sibling)
-			;
-		prevsib->sibling = np->sibling;
-	}
-
-	of_node_set_flag(np, OF_DETACHED);
-
-out_unlock:
-	write_unlock_irqrestore(&devtree_lock, flags);
-}
-
 #ifdef CONFIG_PPC_PSERIES
 /*
  * Fix up the uninitialized fields in a new device node:
@@ -1479,9 +804,9 @@
 	if (machine_is(powermac))
 		return -ENODEV;
 
-	/* fix up new node's linux_phandle field */
+	/* fix up new node's phandle field */
 	if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL)))
-		node->linux_phandle = *ibm_phandle;
+		node->phandle = *ibm_phandle;
 
 out:
 	of_node_put(parent);
@@ -1520,120 +845,6 @@
 __initcall(prom_reconfig_setup);
 #endif
 
-/*
- * Add a property to a node
- */
-int prom_add_property(struct device_node* np, struct property* prop)
-{
-	struct property **next;
-	unsigned long flags;
-
-	prop->next = NULL;	
-	write_lock_irqsave(&devtree_lock, flags);
-	next = &np->properties;
-	while (*next) {
-		if (strcmp(prop->name, (*next)->name) == 0) {
-			/* duplicate ! don't insert it */
-			write_unlock_irqrestore(&devtree_lock, flags);
-			return -1;
-		}
-		next = &(*next)->next;
-	}
-	*next = prop;
-	write_unlock_irqrestore(&devtree_lock, flags);
-
-#ifdef CONFIG_PROC_DEVICETREE
-	/* try to add to proc as well if it was initialized */
-	if (np->pde)
-		proc_device_tree_add_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
-	return 0;
-}
-
-/*
- * Remove a property from a node.  Note that we don't actually
- * remove it, since we have given out who-knows-how-many pointers
- * to the data using get-property.  Instead we just move the property
- * to the "dead properties" list, so it won't be found any more.
- */
-int prom_remove_property(struct device_node *np, struct property *prop)
-{
-	struct property **next;
-	unsigned long flags;
-	int found = 0;
-
-	write_lock_irqsave(&devtree_lock, flags);
-	next = &np->properties;
-	while (*next) {
-		if (*next == prop) {
-			/* found the node */
-			*next = prop->next;
-			prop->next = np->deadprops;
-			np->deadprops = prop;
-			found = 1;
-			break;
-		}
-		next = &(*next)->next;
-	}
-	write_unlock_irqrestore(&devtree_lock, flags);
-
-	if (!found)
-		return -ENODEV;
-
-#ifdef CONFIG_PROC_DEVICETREE
-	/* try to remove the proc node as well */
-	if (np->pde)
-		proc_device_tree_remove_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
-	return 0;
-}
-
-/*
- * Update a property in a node.  Note that we don't actually
- * remove it, since we have given out who-knows-how-many pointers
- * to the data using get-property.  Instead we just move the property
- * to the "dead properties" list, and add the new property to the
- * property list
- */
-int prom_update_property(struct device_node *np,
-			 struct property *newprop,
-			 struct property *oldprop)
-{
-	struct property **next;
-	unsigned long flags;
-	int found = 0;
-
-	write_lock_irqsave(&devtree_lock, flags);
-	next = &np->properties;
-	while (*next) {
-		if (*next == oldprop) {
-			/* found the node */
-			newprop->next = oldprop->next;
-			*next = newprop;
-			oldprop->next = np->deadprops;
-			np->deadprops = oldprop;
-			found = 1;
-			break;
-		}
-		next = &(*next)->next;
-	}
-	write_unlock_irqrestore(&devtree_lock, flags);
-
-	if (!found)
-		return -ENODEV;
-
-#ifdef CONFIG_PROC_DEVICETREE
-	/* try to add to proc as well if it was initialized */
-	if (np->pde)
-		proc_device_tree_update_prop(np->pde, newprop, oldprop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
-	return 0;
-}
-
-
 /* Find the device node for a given logical cpu number, also returns the cpu
  * local thread number (index in ibm,interrupt-server#s) if relevant and
  * asked for (non NULL)
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index 1b42605..0125604 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -80,8 +80,8 @@
 	printk(KERN_INFO "xes_mpc85xx: Enabling L2 as cache\n");
 
 	ctl = MPC85xx_L2CTL_L2E | MPC85xx_L2CTL_L2I;
-	if (machine_is_compatible("MPC8540") ||
-	    machine_is_compatible("MPC8560"))
+	if (of_machine_is_compatible("MPC8540") ||
+	    of_machine_is_compatible("MPC8560"))
 		/*
 		 * Assume L2 SRAM is used fully for cache, so set
 		 * L2BLKSZ (bits 4:5) to match L2SIZ (bits 2:3).
diff --git a/arch/powerpc/platforms/cell/cbe_powerbutton.c b/arch/powerpc/platforms/cell/cbe_powerbutton.c
index dcddaa5..f75a4da 100644
--- a/arch/powerpc/platforms/cell/cbe_powerbutton.c
+++ b/arch/powerpc/platforms/cell/cbe_powerbutton.c
@@ -48,7 +48,7 @@
 	int ret = 0;
 	struct input_dev *dev;
 
-	if (!machine_is_compatible("IBM,CBPLUS-1.0")) {
+	if (!of_machine_is_compatible("IBM,CBPLUS-1.0")) {
 		printk(KERN_ERR "%s: Not a cell blade.\n", __func__);
 		ret = -ENODEV;
 		goto out;
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 5e0a191..608fd2b 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -255,7 +255,7 @@
 {
 	struct cbe_pmd_regs __iomem *regs;
 
-	sysreset_hack = machine_is_compatible("IBM,CBPLUS-1.0");
+	sysreset_hack = of_machine_is_compatible("IBM,CBPLUS-1.0");
 	if (!sysreset_hack)
 		return 0;
 
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 4c506c1..891f18e 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -457,7 +457,7 @@
 			continue;
 		vic_handles = of_get_property(spu_dn, "vicinity", &lenp);
 		for (i=0; i < (lenp / sizeof(phandle)); i++) {
-			if (vic_handles[i] == target->linux_phandle)
+			if (vic_handles[i] == target->phandle)
 				return spu;
 		}
 	}
@@ -499,7 +499,7 @@
 
 			if (strcmp(name, "spe") == 0) {
 				spu = devnode_spu(cbe, vic_dn);
-				avoid_ph = last_spu_dn->linux_phandle;
+				avoid_ph = last_spu_dn->phandle;
 			} else {
 				/*
 				 * "mic-tm" and "bif0" nodes do not have
@@ -514,7 +514,7 @@
 					last_spu->has_mem_affinity = 1;
 					spu->has_mem_affinity = 1;
 				}
-				avoid_ph = vic_dn->linux_phandle;
+				avoid_ph = vic_dn->phandle;
 			}
 
 			list_add_tail(&spu->aff_list, &last_spu->aff_list);
diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c
index fd23a1d..8b0c208 100644
--- a/arch/powerpc/platforms/fsl_uli1575.c
+++ b/arch/powerpc/platforms/fsl_uli1575.c
@@ -222,6 +222,7 @@
 	int i;
 	u8 *dummy;
 	struct pci_bus *bus = dev->bus;
+	struct resource *res;
 	resource_size_t end = 0;
 
 	for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCES+3; i++) {
@@ -230,13 +231,12 @@
 			end = pci_resource_end(dev, i);
 	}
 
-	for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
-		if ((bus->resource[i]) &&
-			(bus->resource[i]->flags & IORESOURCE_MEM)) {
-			if (bus->resource[i]->end == end)
-				dummy = ioremap(bus->resource[i]->start, 0x4);
+	pci_bus_for_each_resource(bus, res, i) {
+		if (res && res->flags & IORESOURCE_MEM) {
+			if (res->end == end)
+				dummy = ioremap(res->start, 0x4);
 			else
-				dummy = ioremap(bus->resource[i]->end - 3, 0x4);
+				dummy = ioremap(res->end - 3, 0x4);
 			if (dummy) {
 				in_8(dummy);
 				iounmap(dummy);
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index be2527a..d35e052 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -304,8 +304,8 @@
 
 static int __init pas_cpufreq_init(void)
 {
-	if (!machine_is_compatible("PA6T-1682M") &&
-	    !machine_is_compatible("pasemi,pwrficient"))
+	if (!of_machine_is_compatible("PA6T-1682M") &&
+	    !of_machine_is_compatible("pasemi,pwrficient"))
 		return -ENODEV;
 
 	return cpufreq_register_driver(&pas_cpufreq_driver);
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 08d94e4..d4f127d 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -657,31 +657,31 @@
 	cur_freq = (*value) / 1000;
 
 	/*  Check for 7447A based MacRISC3 */
-	if (machine_is_compatible("MacRISC3") &&
+	if (of_machine_is_compatible("MacRISC3") &&
 	    of_get_property(cpunode, "dynamic-power-step", NULL) &&
 	    PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
 		pmac_cpufreq_init_7447A(cpunode);
 	/* Check for other MacRISC3 machines */
-	} else if (machine_is_compatible("PowerBook3,4") ||
-		   machine_is_compatible("PowerBook3,5") ||
-		   machine_is_compatible("MacRISC3")) {
+	} else if (of_machine_is_compatible("PowerBook3,4") ||
+		   of_machine_is_compatible("PowerBook3,5") ||
+		   of_machine_is_compatible("MacRISC3")) {
 		pmac_cpufreq_init_MacRISC3(cpunode);
 	/* Else check for iBook2 500/600 */
-	} else if (machine_is_compatible("PowerBook4,1")) {
+	} else if (of_machine_is_compatible("PowerBook4,1")) {
 		hi_freq = cur_freq;
 		low_freq = 400000;
 		set_speed_proc = pmu_set_cpu_speed;
 		is_pmu_based = 1;
 	}
 	/* Else check for TiPb 550 */
-	else if (machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
+	else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
 		hi_freq = cur_freq;
 		low_freq = 500000;
 		set_speed_proc = pmu_set_cpu_speed;
 		is_pmu_based = 1;
 	}
 	/* Else check for TiPb 400 & 500 */
-	else if (machine_is_compatible("PowerBook3,2")) {
+	else if (of_machine_is_compatible("PowerBook3,2")) {
 		/* We only know about the 400 MHz and the 500Mhz model
 		 * they both have 300 MHz as low frequency
 		 */
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 708c751..3ed288e 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -398,11 +398,11 @@
 	int rc = -ENODEV;
 
 	/* Check supported platforms */
-	if (machine_is_compatible("PowerMac8,1") ||
-	    machine_is_compatible("PowerMac8,2") ||
-	    machine_is_compatible("PowerMac9,1"))
+	if (of_machine_is_compatible("PowerMac8,1") ||
+	    of_machine_is_compatible("PowerMac8,2") ||
+	    of_machine_is_compatible("PowerMac9,1"))
 		use_volts_smu = 1;
-	else if (machine_is_compatible("PowerMac11,2"))
+	else if (of_machine_is_compatible("PowerMac11,2"))
 		use_volts_vdnap = 1;
 	else
 		return -ENODEV;
@@ -729,9 +729,9 @@
 		return -ENODEV;
 	}
 
-	if (machine_is_compatible("PowerMac7,2") ||
-	    machine_is_compatible("PowerMac7,3") ||
-	    machine_is_compatible("RackMac3,1"))
+	if (of_machine_is_compatible("PowerMac7,2") ||
+	    of_machine_is_compatible("PowerMac7,3") ||
+	    of_machine_is_compatible("RackMac3,1"))
 		rc = g5_pm72_cpufreq_init(cpus);
 #ifdef CONFIG_PMAC_SMU
 	else
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index fbc9bbd..33e815f 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -2426,7 +2426,7 @@
 	    }
 	}
 	for(i=0; i<ARRAY_SIZE(pmac_mb_defs); i++) {
-	    if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
+	    if (of_machine_is_compatible(pmac_mb_defs[i].model_string)) {
 		pmac_mb = pmac_mb_defs[i];
 		goto found;
 	    }
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index 96d5ce5..ede49e7 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -842,7 +842,7 @@
 	list_for_each_entry(func, &dev->functions, link) {
 		if (name && strcmp(name, func->name))
 			continue;
-		if (func->phandle && target->node != func->phandle)
+		if (func->phandle && target->phandle != func->phandle)
 			continue;
 		if ((func->flags & flags) == 0)
 			continue;
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index b40c22d..6898e82 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -693,9 +693,9 @@
 #ifdef CONFIG_PPC64
 
 	/* i2c based HW sync on some G5s */
-	if (machine_is_compatible("PowerMac7,2") ||
-	    machine_is_compatible("PowerMac7,3") ||
-	    machine_is_compatible("RackMac3,1"))
+	if (of_machine_is_compatible("PowerMac7,2") ||
+	    of_machine_is_compatible("PowerMac7,3") ||
+	    of_machine_is_compatible("RackMac3,1"))
 		smp_core99_setup_i2c_hwsync(ncpus);
 
 	/* pfunc based HW sync on recent G5s */
@@ -713,7 +713,7 @@
 #else /* CONFIG_PPC64 */
 
 	/* GPIO based HW sync on ppc32 Core99 */
-	if (pmac_tb_freeze == NULL && !machine_is_compatible("MacRISC4")) {
+	if (pmac_tb_freeze == NULL && !of_machine_is_compatible("MacRISC4")) {
 		struct device_node *cpu;
 		const u32 *tbprop = NULL;
 
@@ -750,7 +750,7 @@
 #endif
 
 	/* 32 bits SMP can't NAP */
-	if (!machine_is_compatible("MacRISC4"))
+	if (!of_machine_is_compatible("MacRISC4"))
 		powersave_nap = 0;
 }
 
@@ -852,7 +852,7 @@
 		/* If we didn't start the second CPU, we must take
 		 * it off the bus
 		 */
-		if (machine_is_compatible("MacRISC4") &&
+		if (of_machine_is_compatible("MacRISC4") &&
 		    num_online_cpus() < 2)		
 			g5_phy_disable_cpu1();
 #endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index 1810e42..48211ca 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -317,9 +317,9 @@
 	 * calibration. That's better since the VIA itself seems
 	 * to be slightly off. --BenH
 	 */
-	if (!machine_is_compatible("MacRISC2") &&
-	    !machine_is_compatible("MacRISC3") &&
-	    !machine_is_compatible("MacRISC4"))
+	if (!of_machine_is_compatible("MacRISC2") &&
+	    !of_machine_is_compatible("MacRISC3") &&
+	    !of_machine_is_compatible("MacRISC4"))
 		if (via_calibrate_decr())
 			return;
 
@@ -328,7 +328,7 @@
 	 * probably implement calibration based on the KL timer on these
 	 * machines anyway... -BenH
 	 */
-	if (machine_is_compatible("PowerMac3,5"))
+	if (of_machine_is_compatible("PowerMac3,5"))
 		if (via_calibrate_decr())
 			return;
 #endif
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index 9490157..d83135a9 100644
--- a/arch/powerpc/platforms/powermac/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -132,9 +132,9 @@
 		scc_inittab[1] = in_8(sccc);
 		out_8(sccc, 12);
 		scc_inittab[3] = in_8(sccc);
-	} else if (machine_is_compatible("RackMac1,1")
-		   || machine_is_compatible("RackMac1,2")
-		   || machine_is_compatible("MacRISC4")) {
+	} else if (of_machine_is_compatible("RackMac1,1")
+		   || of_machine_is_compatible("RackMac1,2")
+		   || of_machine_is_compatible("MacRISC4")) {
 		/* Xserves and G5s default to 57600 */
 		scc_inittab[1] = 0;
 		scc_inittab[3] = 0;
diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c
index 5da37c2..cf27df6 100644
--- a/arch/powerpc/sysdev/grackle.c
+++ b/arch/powerpc/sysdev/grackle.c
@@ -56,9 +56,9 @@
 void __init setup_grackle(struct pci_controller *hose)
 {
 	setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0);
-	if (machine_is_compatible("PowerMac1,1"))
+	if (of_machine_is_compatible("PowerMac1,1"))
 		ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
-	if (machine_is_compatible("AAPL,PowerBook1998"))
+	if (of_machine_is_compatible("AAPL,PowerBook1998"))
 		grackle_set_loop_snoop(hose, 1);
 #if 0	/* Disabled for now, HW problems ??? */
 	grackle_set_stg(hose, 1);
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 6be4503..58f4673 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -78,14 +78,14 @@
 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 	int ret;
 
-	sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
-	sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
+	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
 			CRYPTO_TFM_REQ_MASK);
 
 	ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
 	if (ret) {
 		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
-		tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
+		tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
 				CRYPTO_TFM_RES_MASK);
 	}
 	return ret;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 2121fbb..05cef50 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -13,7 +13,6 @@
 	select HAVE_LMB
 	select HAVE_OPROFILE
 	select HAVE_GENERIC_DMA_COHERENT
-	select HAVE_IOREMAP_PROT if MMU
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_DMA_API_DEBUG
 	select HAVE_DMA_ATTRS
@@ -22,6 +21,7 @@
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_LZMA
+	select HAVE_KERNEL_LZO
 	select HAVE_SYSCALL_TRACEPOINTS
 	select RTC_LIB
 	select GENERIC_ATOMIC64
@@ -35,6 +35,7 @@
 	def_bool ARCH = "sh"
 	select HAVE_KPROBES
 	select HAVE_KRETPROBES
+	select HAVE_IOREMAP_PROT if MMU && !X2TLB
 	select HAVE_FUNCTION_TRACER
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_DYNAMIC_FTRACE
@@ -42,6 +43,8 @@
 	select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_ARCH_KGDB
+	select HAVE_HW_BREAKPOINT
+	select PERF_EVENTS if HAVE_HW_BREAKPOINT
 	select ARCH_HIBERNATION_POSSIBLE if MMU
 
 config SUPERH64
@@ -78,12 +81,13 @@
 config GENERIC_HARDIRQS_NO__DO_IRQ
 	def_bool y
 
-config GENERIC_IRQ_PROBE
-	def_bool y
-
 config IRQ_PER_CPU
 	def_bool y
 
+config SPARSE_IRQ
+	def_bool y
+	depends on SUPERH32
+
 config GENERIC_GPIO
 	def_bool n
 
@@ -548,8 +552,7 @@
 			      CPU_SUBTYPE_SH7203 || \
 			      CPU_SUBTYPE_SH7206 || \
 			      CPU_SUBTYPE_SH7263 || \
-			      CPU_SUBTYPE_MXG    || \
-			      CPU_SUBTYPE_SH7786
+			      CPU_SUBTYPE_MXG
 	default "60000000" if CPU_SUBTYPE_SH7751 || CPU_SUBTYPE_SH7751R
 	default "66000000" if CPU_SUBTYPE_SH4_202
 	default "50000000"
@@ -563,7 +566,8 @@
 
 config SH_CLK_CPG_LEGACY
 	depends on SH_CLK_CPG
-	def_bool y if !CPU_SUBTYPE_SH7785 && !ARCH_SHMOBILE
+	def_bool y if !CPU_SUBTYPE_SH7785 && !ARCH_SHMOBILE && \
+		      !CPU_SUBTYPE_SH7786
 
 config SH_CLK_MD
 	int "CPU Mode Pin Setting"
@@ -725,18 +729,6 @@
 	  LLSC, this should be more efficient than the other alternative of
 	  disabling interrupts around the atomic sequence.
 
-config SPARSE_IRQ
-	bool "Support sparse irq numbering"
-	depends on EXPERIMENTAL
-	help
-	  This enables support for sparse irqs. This is useful in general
-	  as most CPUs have a fairly sparse array of IRQ vectors, which
-	  the irq_desc then maps directly on to. Systems with a high
-	  number of off-chip IRQs will want to treat this as
-	  experimental until they have been independently verified.
-
-	  If you don't know what to do here, say N.
-
 endmenu
 
 menu "Boot options"
@@ -822,11 +814,15 @@
 config PCI
 	bool "PCI support"
 	depends on SYS_SUPPORTS_PCI
+	select PCI_DOMAINS
 	help
 	  Find out whether you have a PCI motherboard. PCI is the name of a
 	  bus system, i.e. the way the CPU talks to the other stuff inside
 	  your box. If you have PCI, say Y, otherwise N.
 
+config PCI_DOMAINS
+	bool
+
 source "drivers/pci/pcie/Kconfig"
 
 source "drivers/pci/Kconfig"
diff --git a/arch/sh/Kconfig.cpu b/arch/sh/Kconfig.cpu
index cd6e3ea..ddf096c 100644
--- a/arch/sh/Kconfig.cpu
+++ b/arch/sh/Kconfig.cpu
@@ -68,7 +68,8 @@
 
 config SPECULATIVE_EXECUTION
 	bool "Speculative subroutine return"
-	depends on CPU_SUBTYPE_SH7780 && EXPERIMENTAL
+	depends on EXPERIMENTAL
+	depends on CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 || CPU_SUBTYPE_SH7786
 	help
 	  This enables support for a speculative instruction fetch for
 	  subroutine return. There are various pitfalls associated with
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index db91925..588579a 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -83,6 +83,7 @@
 defaultimage-$(CONFIG_SH_7724_SOLUTION_ENGINE)	:= uImage
 defaultimage-$(CONFIG_SH_7206_SOLUTION_ENGINE)	:= vmlinux
 defaultimage-$(CONFIG_SH_7619_SOLUTION_ENGINE)	:= vmlinux
+defaultimage-$(CONFIG_SH_SDK7786)		:= vmlinux.bin
 
 # Set some sensible Kbuild defaults
 KBUILD_IMAGE		:= $(defaultimage-y)
@@ -143,11 +144,11 @@
 machdir-$(CONFIG_SH_KFR2R09)			+= mach-kfr2r09
 machdir-$(CONFIG_SH_ECOVEC)			+= mach-ecovec24
 machdir-$(CONFIG_SH_SDK7780)			+= mach-sdk7780
+machdir-$(CONFIG_SH_SDK7786)			+= mach-sdk7786
 machdir-$(CONFIG_SH_X3PROTO)			+= mach-x3proto
 machdir-$(CONFIG_SH_SH7763RDP)			+= mach-sh7763rdp
 machdir-$(CONFIG_SH_SH4202_MICRODEV)		+= mach-microdev
 machdir-$(CONFIG_SH_LANDISK)			+= mach-landisk
-machdir-$(CONFIG_SH_TITAN)			+= mach-titan
 machdir-$(CONFIG_SH_LBOX_RE2)			+= mach-lboxre2
 machdir-$(CONFIG_SH_CAYMAN)			+= mach-cayman
 machdir-$(CONFIG_SH_RSK)			+= mach-rsk
@@ -203,8 +204,9 @@
 libs-$(CONFIG_SUPERH32)		:= arch/sh/lib/	$(libs-y)
 libs-$(CONFIG_SUPERH64)		:= arch/sh/lib64/ $(libs-y)
 
-BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.srec uImage.bin \
-	       zImage vmlinux.srec romImage
+BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.lzo \
+	       uImage.srec uImage.bin zImage vmlinux.bin vmlinux.srec \
+	       romImage
 PHONY += $(BOOT_TARGETS)
 
 all: $(KBUILD_IMAGE)
@@ -225,10 +227,12 @@
 	@echo '  zImage 	           - Compressed kernel image'
 	@echo '  romImage	           - Compressed ROM image, if supported'
 	@echo '  vmlinux.srec	           - Create an ELF S-record'
+	@echo '  vmlinux.bin	           - Create an uncompressed binary image'
 	@echo '* uImage  	           - Alias to bootable U-Boot image'
 	@echo '  uImage.srec	           - Create an S-record for U-Boot'
 	@echo '  uImage.bin	           - Kernel-only image for U-Boot (bin)'
 	@echo '* uImage.gz	           - Kernel-only image for U-Boot (gzip)'
 	@echo '  uImage.bz2	           - Kernel-only image for U-Boot (bzip2)'
 	@echo '  uImage.lzma	           - Kernel-only image for U-Boot (lzma)'
+	@echo '  uImage.lzo	           - Kernel-only image for U-Boot (lzo)'
 endef
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index aedd9de..938e87d 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -150,6 +150,14 @@
 	  Select SDK7780 if configuring for a Renesas SH7780 SDK7780R3
 	  evaluation board.
 
+config SH_SDK7786
+	bool "SDK7786"
+	depends on CPU_SUBTYPE_SH7786
+	select SYS_SUPPORTS_PCI
+	help
+	  Select SDK7786 if configuring for a Renesas Technology Europe
+	  SH7786-65nm board.
+
 config SH_HIGHLANDER
 	bool "Highlander"
 	depends on CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile
index ce0f263..4f90f9b 100644
--- a/arch/sh/boards/Makefile
+++ b/arch/sh/boards/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_SH_EDOSK7760)	+= board-edosk7760.o
 obj-$(CONFIG_SH_ESPT)		+= board-espt.o
 obj-$(CONFIG_SH_POLARIS)	+= board-polaris.o
+obj-$(CONFIG_SH_TITAN)		+= board-titan.o
diff --git a/arch/sh/boards/board-magicpanelr2.c b/arch/sh/boards/board-magicpanelr2.c
index 99ffc5f..efba450 100644
--- a/arch/sh/boards/board-magicpanelr2.c
+++ b/arch/sh/boards/board-magicpanelr2.c
@@ -23,7 +23,7 @@
 #include <asm/heartbeat.h>
 #include <cpu/sh7720.h>
 
-#define LAN9115_READY	(ctrl_inl(0xA8000084UL) & 0x00000001UL)
+#define LAN9115_READY	(__raw_readl(0xA8000084UL) & 0x00000001UL)
 
 /* Prefer cmdline over RedBoot */
 static const char *probes[] = { "cmdlinepart", "RedBoot", NULL };
@@ -60,33 +60,33 @@
 {
 	/* CS2: LAN (0x08000000 - 0x0bffffff) */
 	/* no idle cycles, normal space, 8 bit data bus */
-	ctrl_outl(0x36db0400, CS2BCR);
+	__raw_writel(0x36db0400, CS2BCR);
 	/* (SW:1.5 WR:3 HW:1.5), ext. wait */
-	ctrl_outl(0x000003c0, CS2WCR);
+	__raw_writel(0x000003c0, CS2WCR);
 
 	/* CS4: CAN1 (0xb0000000 - 0xb3ffffff) */
 	/* no idle cycles, normal space, 8 bit data bus */
-	ctrl_outl(0x00000200, CS4BCR);
+	__raw_writel(0x00000200, CS4BCR);
 	/* (SW:1.5 WR:3 HW:1.5), ext. wait */
-	ctrl_outl(0x00100981, CS4WCR);
+	__raw_writel(0x00100981, CS4WCR);
 
 	/* CS5a: CAN2 (0xb4000000 - 0xb5ffffff) */
 	/* no idle cycles, normal space, 8 bit data bus */
-	ctrl_outl(0x00000200, CS5ABCR);
+	__raw_writel(0x00000200, CS5ABCR);
 	/* (SW:1.5 WR:3 HW:1.5), ext. wait */
-	ctrl_outl(0x00100981, CS5AWCR);
+	__raw_writel(0x00100981, CS5AWCR);
 
 	/* CS5b: CAN3 (0xb6000000 - 0xb7ffffff) */
 	/* no idle cycles, normal space, 8 bit data bus */
-	ctrl_outl(0x00000200, CS5BBCR);
+	__raw_writel(0x00000200, CS5BBCR);
 	/* (SW:1.5 WR:3 HW:1.5), ext. wait */
-	ctrl_outl(0x00100981, CS5BWCR);
+	__raw_writel(0x00100981, CS5BWCR);
 
 	/* CS6a: Rotary (0xb8000000 - 0xb9ffffff) */
 	/* no idle cycles, normal space, 8 bit data bus */
-	ctrl_outl(0x00000200, CS6ABCR);
+	__raw_writel(0x00000200, CS6ABCR);
 	/* (SW:1.5 WR:3 HW:1.5), no ext. wait */
-	ctrl_outl(0x001009C1, CS6AWCR);
+	__raw_writel(0x001009C1, CS6AWCR);
 }
 
 static void __init setup_port_multiplexing(void)
@@ -94,71 +94,71 @@
 	/* A7 GPO(LED8);     A6 GPO(LED7);     A5 GPO(LED6);	  A4 GPO(LED5);
 	 * A3 GPO(LED4);     A2 GPO(LED3);     A1 GPO(LED2);	  A0 GPO(LED1);
 	 */
-	ctrl_outw(0x5555, PORT_PACR);	/* 01 01 01 01 01 01 01 01 */
+	__raw_writew(0x5555, PORT_PACR);	/* 01 01 01 01 01 01 01 01 */
 
 	/* B7 GPO(RST4);   B6 GPO(RST3);  B5 GPO(RST2);    B4 GPO(RST1);
 	 * B3 GPO(PB3);	   B2 GPO(PB2);	  B1 GPO(PB1);	   B0 GPO(PB0);
 	 */
-	ctrl_outw(0x5555, PORT_PBCR);	/* 01 01 01 01 01 01 01 01 */
+	__raw_writew(0x5555, PORT_PBCR);	/* 01 01 01 01 01 01 01 01 */
 
 	/* C7 GPO(PC7);	  C6 GPO(PC6);	  C5 GPO(PC5);	   C4 GPO(PC4);
 	 * C3 LCD_DATA3;  C2 LCD_DATA2;   C1 LCD_DATA1;	   C0 LCD_DATA0;
 	 */
-	ctrl_outw(0x5500, PORT_PCCR);	/* 01 01 01 01 00 00 00 00 */
+	__raw_writew(0x5500, PORT_PCCR);	/* 01 01 01 01 00 00 00 00 */
 
 	/* D7 GPO(PD7);	D6 GPO(PD6);	D5 GPO(PD5);	   D4 GPO(PD4);
 	 * D3 GPO(PD3);	D2 GPO(PD2);	D1 GPO(PD1);	   D0 GPO(PD0);
 	 */
-	ctrl_outw(0x5555, PORT_PDCR);	/* 01 01 01 01 01 01 01 01 */
+	__raw_writew(0x5555, PORT_PDCR);	/* 01 01 01 01 01 01 01 01 */
 
 	/* E7 (x);	  E6 GPI(nu);	 E5 GPI(nu);	  E4 LCD_M_DISP;
 	 * E3 LCD_CL1;	  E2 LCD_CL2;	 E1 LCD_DON;	  E0 LCD_FLM;
 	 */
-	ctrl_outw(0x3C00, PORT_PECR);	/* 00 11 11 00 00 00 00 00 */
+	__raw_writew(0x3C00, PORT_PECR);	/* 00 11 11 00 00 00 00 00 */
 
 	/* F7 (x);	     F6 DA1(VLCD);     F5 DA0(nc);	  F4 AN3;
 	 * F3 AN2(MID_AD);   F2 AN1(EARTH_AD); F1 AN0(TEMP);	  F0 GPI+(nc);
 	 */
-	ctrl_outw(0x0002, PORT_PFCR);	/* 00 00 00 00 00 00 00 10 */
+	__raw_writew(0x0002, PORT_PFCR);	/* 00 00 00 00 00 00 00 10 */
 
 	/* G7 (x);	  G6 IRQ5(TOUCH_BUSY); G5 IRQ4(TOUCH_IRQ); G4 GPI(KEY2);
 	 * G3 GPI(KEY1);  G2 GPO(LED11);	G1 GPO(LED10);     G0 GPO(LED9);
 	 */
-	ctrl_outw(0x03D5, PORT_PGCR);	/* 00 00 00 11 11 01 01 01 */
+	__raw_writew(0x03D5, PORT_PGCR);	/* 00 00 00 11 11 01 01 01 */
 
 	/* H7 (x);	      H6 /RAS(BRAS);	  H5 /CAS(BCAS); H4 CKE(BCKE);
 	 * H3 GPO(EARTH_OFF); H2 GPO(EARTH_TEST); H1 USB2_PWR;	 H0 USB1_PWR;
 	 */
-	ctrl_outw(0x0050, PORT_PHCR);	/* 00 00 00 00 01 01 00 00 */
+	__raw_writew(0x0050, PORT_PHCR);	/* 00 00 00 00 01 01 00 00 */
 
 	/* J7 (x);	  J6 AUDCK;	   J5 ASEBRKAK;	    J4 AUDATA3;
 	 * J3 AUDATA2;	  J2 AUDATA1;	   J1 AUDATA0;	    J0 AUDSYNC;
 	 */
-	ctrl_outw(0x0000, PORT_PJCR);	/* 00 00 00 00 00 00 00 00 */
+	__raw_writew(0x0000, PORT_PJCR);	/* 00 00 00 00 00 00 00 00 */
 
 	/* K7 (x);	    K6 (x);	     K5 (x);	   K4 (x);
 	 * K3 PINT7(/PWR2); K2 PINT6(/PWR1); K1 PINT5(nu); K0 PINT4(FLASH_READY)
 	 */
-	ctrl_outw(0x00FF, PORT_PKCR);	/* 00 00 00 00 11 11 11 11 */
+	__raw_writew(0x00FF, PORT_PKCR);	/* 00 00 00 00 11 11 11 11 */
 
 	/* L7 TRST;	   L6 TMS;	     L5 TDO;		  L4 TDI;
 	 * L3 TCK;	   L2 (x);	     L1 (x);		  L0 (x);
 	 */
-	ctrl_outw(0x0000, PORT_PLCR);	/* 00 00 00 00 00 00 00 00 */
+	__raw_writew(0x0000, PORT_PLCR);	/* 00 00 00 00 00 00 00 00 */
 
 	/* M7 GPO(CURRENT_SINK);    M6 GPO(PWR_SWITCH);     M5 GPO(LAN_SPEED);
 	 * M4 GPO(LAN_RESET);       M3 GPO(BUZZER);	    M2 GPO(LCD_BL);
 	 * M1 CS5B(CAN3_CS);	    M0 GPI+(nc);
 	 */
-	ctrl_outw(0x5552, PORT_PMCR);	   /* 01 01 01 01 01 01 00 10 */
+	__raw_writew(0x5552, PORT_PMCR);	   /* 01 01 01 01 01 01 00 10 */
 
 	/* CURRENT_SINK=off,	PWR_SWITCH=off, LAN_SPEED=100MBit,
 	 * LAN_RESET=off,	BUZZER=off,	LCD_BL=off
 	 */
 #if CONFIG_SH_MAGIC_PANEL_R2_VERSION == 2
-	ctrl_outb(0x30, PORT_PMDR);
+	__raw_writeb(0x30, PORT_PMDR);
 #elif CONFIG_SH_MAGIC_PANEL_R2_VERSION == 3
-	ctrl_outb(0xF0, PORT_PMDR);
+	__raw_writeb(0xF0, PORT_PMDR);
 #else
 #error Unknown revision of PLATFORM_MP_R2
 #endif
@@ -167,8 +167,8 @@
 	 * P4 GPO(nu);	       P3 IRQ3(LAN_IRQ);  P2 IRQ2(CAN3_IRQ);
 	 * P1 IRQ1(CAN2_IRQ);  P0 IRQ0(CAN1_IRQ)
 	 */
-	ctrl_outw(0x0100, PORT_PPCR);	/* 00 00 00 01 00 00 00 00 */
-	ctrl_outb(0x10, PORT_PPDR);
+	__raw_writew(0x0100, PORT_PPCR);	/* 00 00 00 01 00 00 00 00 */
+	__raw_writeb(0x10, PORT_PPDR);
 
 	/* R7 A25;	     R6 A24;	     R5 A23;		  R4 A22;
 	 * R3 A21;	     R2 A20;	     R1 A19;		  R0 A0;
@@ -185,22 +185,22 @@
 	/* S7 (x);		S6 (x);        S5 (x);	     S4 GPO(EEPROM_CS2);
 	 * S3 GPO(EEPROM_CS1);  S2 SIOF0_TXD;  S1 SIOF0_RXD; S0 SIOF0_SCK;
 	 */
-	ctrl_outw(0x0140, PORT_PSCR);	/* 00 00 00 01 01 00 00 00 */
+	__raw_writew(0x0140, PORT_PSCR);	/* 00 00 00 01 01 00 00 00 */
 
 	/* T7 (x);	   T6 (x);	  T5 (x);	  T4 COM1_CTS;
 	 * T3 COM1_RTS;	   T2 COM1_TXD;	  T1 COM1_RXD;	  T0 GPO(WDOG)
 	 */
-	ctrl_outw(0x0001, PORT_PTCR);	/* 00 00 00 00 00 00 00 01 */
+	__raw_writew(0x0001, PORT_PTCR);	/* 00 00 00 00 00 00 00 01 */
 
 	/* U7 (x);	     U6 (x);	   U5 (x);	  U4 GPI+(/AC_FAULT);
 	 * U3 GPO(TOUCH_CS); U2 TOUCH_TXD; U1 TOUCH_RXD;  U0 TOUCH_SCK;
 	 */
-	ctrl_outw(0x0240, PORT_PUCR);	/* 00 00 00 10 01 00 00 00 */
+	__raw_writew(0x0240, PORT_PUCR);	/* 00 00 00 10 01 00 00 00 */
 
 	/* V7 (x);	  V6 (x);	V5 (x);		  V4 GPO(MID2);
 	 * V3 GPO(MID1);  V2 CARD_TxD;	V1 CARD_RxD;	  V0 GPI+(/BAT_FAULT);
 	 */
-	ctrl_outw(0x0142, PORT_PVCR);	/* 00 00 00 01 01 00 00 10 */
+	__raw_writew(0x0142, PORT_PVCR);	/* 00 00 00 01 01 00 00 10 */
 }
 
 static void __init mpr2_setup(char **cmdline_p)
@@ -209,24 +209,24 @@
 	 * /PCC_CD1, /PCC_CD2,  PCC_BVD1, PCC_BVD2,
 	 * /IOIS16,  IRQ4,	IRQ5,	  USB1d_SUSPEND
 	 */
-	ctrl_outw(0xAABC, PORT_PSELA);
+	__raw_writew(0xAABC, PORT_PSELA);
 	/* set Pin Select Register B:
 	 * /SCIF0_RTS, /SCIF0_CTS, LCD_VCPWC,
 	 * LCD_VEPWC,  IIC_SDA,    IIC_SCL, Reserved
 	 */
-	ctrl_outw(0x3C00, PORT_PSELB);
+	__raw_writew(0x3C00, PORT_PSELB);
 	/* set Pin Select Register C:
 	 * SIOF1_SCK, SIOF1_RxD, SCIF1_RxD, SCIF1_TxD, Reserved
 	 */
-	ctrl_outw(0x0000, PORT_PSELC);
+	__raw_writew(0x0000, PORT_PSELC);
 	/* set Pin Select Register D: Reserved, SIOF1_TxD, Reserved, SIOF1_MCLK,
 	 * Reserved, SIOF1_SYNC, Reserved, SCIF1_SCK, Reserved
 	 */
-	ctrl_outw(0x0000, PORT_PSELD);
+	__raw_writew(0x0000, PORT_PSELD);
 	/* set USB TxRx Control: Reserved, DRV, Reserved, USB_TRANS, USB_SEL */
-	ctrl_outw(0x0101, PORT_UTRCTL);
+	__raw_writew(0x0101, PORT_UTRCTL);
 	/* set USB Clock Control: USSCS, USSTB, Reserved (HighByte always A5) */
-	ctrl_outw(0xA5C0, PORT_UCLKCR_W);
+	__raw_writew(0xA5C0, PORT_UCLKCR_W);
 
 	setup_chip_select();
 
diff --git a/arch/sh/boards/board-polaris.c b/arch/sh/boards/board-polaris.c
index 62607eb..5948663 100644
--- a/arch/sh/boards/board-polaris.c
+++ b/arch/sh/boards/board-polaris.c
@@ -59,15 +59,12 @@
 static struct heartbeat_data heartbeat_data = {
 	.bit_pos	= heartbeat_bit_pos,
 	.nr_bits	= ARRAY_SIZE(heartbeat_bit_pos),
-	.regsize	= 8,
 };
 
-static struct resource heartbeat_resources[] = {
-	[0] = {
-		.start	= PORT_PCDR,
-		.end	= PORT_PCDR,
-		.flags	= IORESOURCE_MEM,
-	},
+static struct resource heartbeat_resource = {
+	.start	= PORT_PCDR,
+	.end	= PORT_PCDR,
+	.flags	= IORESOURCE_MEM | IORESOURCE_MEM_8BIT,
 };
 
 static struct platform_device heartbeat_device = {
@@ -76,8 +73,8 @@
 	.dev	= {
 		.platform_data	= &heartbeat_data,
 	},
-	.num_resources	= ARRAY_SIZE(heartbeat_resources),
-	.resource	= heartbeat_resources,
+	.num_resources	= 1,
+	.resource	= &heartbeat_resource,
 };
 
 static struct platform_device *polaris_devices[] __initdata = {
@@ -92,15 +89,15 @@
 	printk(KERN_INFO "Configuring Polaris external bus\n");
 
 	/* Configure area 5 with 2 wait states */
-	wcr = ctrl_inw(WCR2);
+	wcr = __raw_readw(WCR2);
 	wcr &= (~AREA5_WAIT_CTRL);
 	wcr |= (WAIT_STATES_10 << 10);
-	ctrl_outw(wcr, WCR2);
+	__raw_writew(wcr, WCR2);
 
 	/* Configure area 5 for 32-bit access */
-	bcr_mask = ctrl_inw(BCR2);
+	bcr_mask = __raw_readw(BCR2);
 	bcr_mask |= 1 << 10;
-	ctrl_outw(bcr_mask, BCR2);
+	__raw_writew(bcr_mask, BCR2);
 
 	return platform_add_devices(polaris_devices,
 				    ARRAY_SIZE(polaris_devices));
@@ -131,13 +128,13 @@
 static void __init init_polaris_irq(void)
 {
 	/* Disable all interrupts */
-	ctrl_outw(0, BCR_ILCRA);
-	ctrl_outw(0, BCR_ILCRB);
-	ctrl_outw(0, BCR_ILCRC);
-	ctrl_outw(0, BCR_ILCRD);
-	ctrl_outw(0, BCR_ILCRE);
-	ctrl_outw(0, BCR_ILCRF);
-	ctrl_outw(0, BCR_ILCRG);
+	__raw_writew(0, BCR_ILCRA);
+	__raw_writew(0, BCR_ILCRB);
+	__raw_writew(0, BCR_ILCRC);
+	__raw_writew(0, BCR_ILCRD);
+	__raw_writew(0, BCR_ILCRE);
+	__raw_writew(0, BCR_ILCRF);
+	__raw_writew(0, BCR_ILCRG);
 
 	register_ipr_controller(&ipr_irq_desc);
 }
diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c
index e5a8a2f..fe7e686 100644
--- a/arch/sh/boards/board-sh7785lcr.c
+++ b/arch/sh/boards/board-sh7785lcr.c
@@ -21,6 +21,7 @@
 #include <linux/i2c-algo-pca.h>
 #include <linux/usb/r8a66597.h>
 #include <linux/irq.h>
+#include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/errno.h>
 #include <mach/sh7785lcr.h>
@@ -32,26 +33,17 @@
  * NOTE: This board has 2 physical memory maps.
  *	 Please look at include/asm-sh/sh7785lcr.h or hardware manual.
  */
-static struct resource heartbeat_resources[] = {
-	[0] = {
-		.start	= PLD_LEDCR,
-		.end	= PLD_LEDCR,
-		.flags	= IORESOURCE_MEM,
-	},
-};
-
-static struct heartbeat_data heartbeat_data = {
-	.regsize = 8,
+static struct resource heartbeat_resource = {
+	.start	= PLD_LEDCR,
+	.end	= PLD_LEDCR,
+	.flags	= IORESOURCE_MEM | IORESOURCE_MEM_8BIT,
 };
 
 static struct platform_device heartbeat_device = {
 	.name		= "heartbeat",
 	.id		= -1,
-	.dev	= {
-		.platform_data	= &heartbeat_data,
-	},
-	.num_resources	= ARRAY_SIZE(heartbeat_resources),
-	.resource	= heartbeat_resources,
+	.num_resources	= 1,
+	.resource	= &heartbeat_resource,
 };
 
 static struct mtd_partition nor_flash_partitions[] = {
@@ -341,8 +333,14 @@
 	pm_power_off = sh7785lcr_power_off;
 
 	/* sm501 DRAM configuration */
-	sm501_reg = (void __iomem *)0xb3e00000 + SM501_DRAM_CONTROL;
-	writel(0x000307c2, sm501_reg);
+	sm501_reg = ioremap_nocache(SM107_REG_ADDR, SM501_DRAM_CONTROL);
+	if (!sm501_reg) {
+		printk(KERN_ERR "%s: ioremap error.\n", __func__);
+		return;
+	}
+
+	writel(0x000307c2, sm501_reg + SM501_DRAM_CONTROL);
+	iounmap(sm501_reg);
 }
 
 /* Return the board specific boot mode pin configuration */
diff --git a/arch/sh/boards/board-shmin.c b/arch/sh/boards/board-shmin.c
index b1dcbbc..325bed5 100644
--- a/arch/sh/boards/board-shmin.c
+++ b/arch/sh/boards/board-shmin.c
@@ -17,8 +17,8 @@
 
 static void __init init_shmin_irq(void)
 {
-	ctrl_outw(0x2a00, PFC_PHCR);	// IRQ0-3=IRQ
-	ctrl_outw(0x0aaa, INTC_ICR1);	// IRQ0-3=IRQ-mode,Low-active.
+	__raw_writew(0x2a00, PFC_PHCR);	// IRQ0-3=IRQ
+	__raw_writew(0x0aaa, INTC_ICR1);	// IRQ0-3=IRQ-mode,Low-active.
 	plat_irq_setup_pins(IRQ_MODE_IRQ);
 }
 
diff --git a/arch/sh/boards/board-titan.c b/arch/sh/boards/board-titan.c
new file mode 100644
index 0000000..94c36c7b
--- /dev/null
+++ b/arch/sh/boards/board-titan.c
@@ -0,0 +1,24 @@
+/*
+ * arch/sh/boards/titan/setup.c - Setup for Titan
+ *
+ *  Copyright (C) 2006  Jamie Lenehan
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <mach/titan.h>
+#include <asm/io.h>
+
+static void __init init_titan_irq(void)
+{
+	/* enable individual interrupt mode for externals */
+	plat_irq_setup_pins(IRQ_MODE_IRQ);
+}
+
+static struct sh_machine_vector mv_titan __initmv = {
+	.mv_name	= "Titan",
+	.mv_init_irq	= init_titan_irq,
+};
diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c
index 36b8bac..a9bd6e3 100644
--- a/arch/sh/boards/board-urquell.c
+++ b/arch/sh/boards/board-urquell.c
@@ -2,7 +2,7 @@
  * Renesas Technology Corp. SH7786 Urquell Support.
  *
  * Copyright (C) 2008  Kuninori Morimoto <morimoto.kuninori@renesas.com>
- * Copyright (C) 2009  Paul Mundt
+ * Copyright (C) 2009, 2010  Paul Mundt
  *
  * Based on board-sh7785lcr.c
  * Copyright (C) 2008  Yoshihiro Shimoda
@@ -19,6 +19,7 @@
 #include <linux/delay.h>
 #include <linux/gpio.h>
 #include <linux/irq.h>
+#include <linux/clk.h>
 #include <mach/urquell.h>
 #include <cpu/sh7786.h>
 #include <asm/heartbeat.h>
@@ -50,26 +51,17 @@
  */
 
 /* HeartBeat */
-static struct resource heartbeat_resources[] = {
-	[0] = {
-		.start	= BOARDREG(SLEDR),
-		.end	= BOARDREG(SLEDR),
-		.flags	= IORESOURCE_MEM,
-	},
-};
-
-static struct heartbeat_data heartbeat_data = {
-	.regsize = 16,
+static struct resource heartbeat_resource = {
+	.start	= BOARDREG(SLEDR),
+	.end	= BOARDREG(SLEDR),
+	.flags	= IORESOURCE_MEM | IORESOURCE_MEM_16BIT,
 };
 
 static struct platform_device heartbeat_device = {
 	.name		= "heartbeat",
 	.id		= -1,
-	.dev	= {
-		.platform_data	= &heartbeat_data,
-	},
-	.num_resources	= ARRAY_SIZE(heartbeat_resources),
-	.resource	= heartbeat_resources,
+	.num_resources	= 1,
+	.resource	= &heartbeat_resource,
 };
 
 /* LAN91C111 */
@@ -184,6 +176,27 @@
 	return __raw_readw(UBOARDREG(MDSWMR));
 }
 
+static int urquell_clk_init(void)
+{
+	struct clk *clk;
+	int ret;
+
+	/*
+	 * Only handle the EXTAL case, anyone interfacing a crystal
+	 * resonator will need to provide their own input clock.
+	 */
+	if (test_mode_pin(MODE_PIN9))
+		return -EINVAL;
+
+	clk = clk_get(NULL, "extal");
+	if (!clk || IS_ERR(clk))
+		return PTR_ERR(clk);
+	ret = clk_set_rate(clk, 33333333);
+	clk_put(clk);
+
+	return ret;
+}
+
 /* Initialize the board */
 static void __init urquell_setup(char **cmdline_p)
 {
@@ -200,4 +213,5 @@
 	.mv_setup	= urquell_setup,
 	.mv_init_irq	= urquell_init_irq,
 	.mv_mode_pins	= urquell_mode_pins,
+	.mv_clk_init	= urquell_clk_init,
 };
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 71f556f..57e37e2 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -159,21 +159,21 @@
 	msleep(100);
 
 	/* ASD AP-320/325 LCD ON */
-	ctrl_outw(FPGA_LCDREG_VAL, FPGA_LCDREG);
+	__raw_writew(FPGA_LCDREG_VAL, FPGA_LCDREG);
 
 	/* backlight */
 	gpio_set_value(GPIO_PTS3, 0);
-	ctrl_outw(0x100, FPGA_BKLREG);
+	__raw_writew(0x100, FPGA_BKLREG);
 }
 
 static void ap320_wvga_power_off(void *board_data)
 {
 	/* backlight */
-	ctrl_outw(0, FPGA_BKLREG);
+	__raw_writew(0, FPGA_BKLREG);
 	gpio_set_value(GPIO_PTS3, 1);
 
 	/* ASD AP-320/325 LCD OFF */
-	ctrl_outw(0, FPGA_LCDREG);
+	__raw_writew(0, FPGA_LCDREG);
 }
 
 static struct sh_mobile_lcdc_info lcdc_info = {
@@ -420,7 +420,7 @@
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
-		.start	= 101,
+		.start	= 100,
 		.flags  = IORESOURCE_IRQ,
 	},
 };
@@ -443,7 +443,7 @@
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
-		.start	= 24,
+		.start	= 23,
 		.flags  = IORESOURCE_IRQ,
 	},
 };
@@ -595,7 +595,7 @@
 	gpio_request(GPIO_PTZ4, NULL);
 	gpio_direction_output(GPIO_PTZ4, 0); /* SADDR */
 
-	ctrl_outw(ctrl_inw(PORT_MSELCRB) & ~0x0001, PORT_MSELCRB);
+	__raw_writew(__raw_readw(PORT_MSELCRB) & ~0x0001, PORT_MSELCRB);
 
 	/* FLCTL */
 	gpio_request(GPIO_FN_FCE, NULL);
@@ -613,9 +613,9 @@
 	gpio_request(GPIO_FN_FWE, NULL);
 	gpio_request(GPIO_FN_FRB, NULL);
 
-	ctrl_outw(0, PORT_HIZCRC);
-	ctrl_outw(0xFFFF, PORT_DRVCRA);
-	ctrl_outw(0xFFFF, PORT_DRVCRB);
+	__raw_writew(0, PORT_HIZCRC);
+	__raw_writew(0xFFFF, PORT_DRVCRA);
+	__raw_writew(0xFFFF, PORT_DRVCRB);
 
 	platform_resource_setup_memory(&ceu_device, "ceu", 4 << 20);
 
diff --git a/arch/sh/boards/mach-cayman/irq.c b/arch/sh/boards/mach-cayman/irq.c
index 33f7708..1394b07 100644
--- a/arch/sh/boards/mach-cayman/irq.c
+++ b/arch/sh/boards/mach-cayman/irq.c
@@ -66,9 +66,9 @@
 	reg = EPLD_MASK_BASE + ((irq / 8) << 2);
 	bit = 1<<(irq % 8);
 	local_irq_save(flags);
-	mask = ctrl_inl(reg);
+	mask = __raw_readl(reg);
 	mask |= bit;
-	ctrl_outl(mask, reg);
+	__raw_writel(mask, reg);
 	local_irq_restore(flags);
 }
 
@@ -83,9 +83,9 @@
 	reg = EPLD_MASK_BASE + ((irq / 8) << 2);
 	bit = 1<<(irq % 8);
 	local_irq_save(flags);
-	mask = ctrl_inl(reg);
+	mask = __raw_readl(reg);
 	mask &= ~bit;
-	ctrl_outl(mask, reg);
+	__raw_writel(mask, reg);
 	local_irq_restore(flags);
 }
 
@@ -109,8 +109,8 @@
 		unsigned long status;
 		int i;
 
-		status = ctrl_inl(EPLD_STATUS_BASE) &
-			 ctrl_inl(EPLD_MASK_BASE) & 0xff;
+		status = __raw_readl(EPLD_STATUS_BASE) &
+			 __raw_readl(EPLD_MASK_BASE) & 0xff;
 		if (status == 0) {
 			irq = -1;
 		} else {
@@ -126,8 +126,8 @@
 		unsigned long status;
 		int i;
 
-		status = ctrl_inl(EPLD_STATUS_BASE + 3 * sizeof(u32)) &
-			 ctrl_inl(EPLD_MASK_BASE + 3 * sizeof(u32)) & 0xff;
+		status = __raw_readl(EPLD_STATUS_BASE + 3 * sizeof(u32)) &
+			 __raw_readl(EPLD_MASK_BASE + 3 * sizeof(u32)) & 0xff;
 		if (status == 0) {
 			irq = -1;
 		} else {
diff --git a/arch/sh/boards/mach-dreamcast/irq.c b/arch/sh/boards/mach-dreamcast/irq.c
index f55fc8e..d932667 100644
--- a/arch/sh/boards/mach-dreamcast/irq.c
+++ b/arch/sh/boards/mach-dreamcast/irq.c
@@ -135,3 +135,30 @@
 	/* Not reached */
 	return irq;
 }
+
+void systemasic_irq_init(void)
+{
+	int i, nid = cpu_to_node(boot_cpu_data);
+
+	/* Assign all virtual IRQs to the System ASIC int. handler */
+	for (i = HW_EVENT_IRQ_BASE; i < HW_EVENT_IRQ_MAX; i++) {
+		unsigned int irq;
+
+		irq = create_irq_nr(i, nid);
+		if (unlikely(irq == 0)) {
+			pr_err("%s: failed hooking irq %d for systemasic\n",
+			       __func__, i);
+			return;
+		}
+
+		if (unlikely(irq != i)) {
+			pr_err("%s: got irq %d but wanted %d, bailing.\n",
+			       __func__, irq, i);
+			destroy_irq(irq);
+			return;
+		}
+
+		set_irq_chip_and_handler(i, &systemasic_int,
+					 handle_level_irq);
+	}
+}
diff --git a/arch/sh/boards/mach-dreamcast/rtc.c b/arch/sh/boards/mach-dreamcast/rtc.c
index a743368..061d657 100644
--- a/arch/sh/boards/mach-dreamcast/rtc.c
+++ b/arch/sh/boards/mach-dreamcast/rtc.c
@@ -35,11 +35,11 @@
 	unsigned long val1, val2;
 
 	do {
-		val1 = ((ctrl_inl(AICA_RTC_SECS_H) & 0xffff) << 16) |
-			(ctrl_inl(AICA_RTC_SECS_L) & 0xffff);
+		val1 = ((__raw_readl(AICA_RTC_SECS_H) & 0xffff) << 16) |
+			(__raw_readl(AICA_RTC_SECS_L) & 0xffff);
 
-		val2 = ((ctrl_inl(AICA_RTC_SECS_H) & 0xffff) << 16) |
-			(ctrl_inl(AICA_RTC_SECS_L) & 0xffff);
+		val2 = ((__raw_readl(AICA_RTC_SECS_H) & 0xffff) << 16) |
+			(__raw_readl(AICA_RTC_SECS_L) & 0xffff);
 	} while (val1 != val2);
 
 	ts->tv_sec = val1 - TWENTY_YEARS;
@@ -60,14 +60,14 @@
 	unsigned long adj = secs + TWENTY_YEARS;
 
 	do {
-		ctrl_outl((adj & 0xffff0000) >> 16, AICA_RTC_SECS_H);
-		ctrl_outl((adj & 0xffff), AICA_RTC_SECS_L);
+		__raw_writel((adj & 0xffff0000) >> 16, AICA_RTC_SECS_H);
+		__raw_writel((adj & 0xffff), AICA_RTC_SECS_L);
 
-		val1 = ((ctrl_inl(AICA_RTC_SECS_H) & 0xffff) << 16) |
-			(ctrl_inl(AICA_RTC_SECS_L) & 0xffff);
+		val1 = ((__raw_readl(AICA_RTC_SECS_H) & 0xffff) << 16) |
+			(__raw_readl(AICA_RTC_SECS_L) & 0xffff);
 
-		val2 = ((ctrl_inl(AICA_RTC_SECS_H) & 0xffff) << 16) |
-			(ctrl_inl(AICA_RTC_SECS_L) & 0xffff);
+		val2 = ((__raw_readl(AICA_RTC_SECS_H) & 0xffff) << 16) |
+			(__raw_readl(AICA_RTC_SECS_L) & 0xffff);
 	} while (val1 != val2);
 
 	return 0;
diff --git a/arch/sh/boards/mach-dreamcast/setup.c b/arch/sh/boards/mach-dreamcast/setup.c
index a4b7402..ad1a4db 100644
--- a/arch/sh/boards/mach-dreamcast/setup.c
+++ b/arch/sh/boards/mach-dreamcast/setup.c
@@ -28,25 +28,8 @@
 #include <asm/machvec.h>
 #include <mach/sysasic.h>
 
-extern struct irq_chip systemasic_int;
-extern void aica_time_init(void);
-extern int systemasic_irq_demux(int);
-
 static void __init dreamcast_setup(char **cmdline_p)
 {
-	int i;
-
-	/* Mask all hardware events */
-	/* XXX */
-
-	/* Acknowledge any previous events */
-	/* XXX */
-
-	/* Assign all virtual IRQs to the System ASIC int. handler */
-	for (i = HW_EVENT_IRQ_BASE; i < HW_EVENT_IRQ_MAX; i++)
-		set_irq_chip_and_handler(i, &systemasic_int,
-					 handle_level_irq);
-
 	board_time_init = aica_time_init;
 }
 
@@ -54,4 +37,5 @@
 	.mv_name		= "Sega Dreamcast",
 	.mv_setup		= dreamcast_setup,
 	.mv_irq_demux		= systemasic_irq_demux,
+	.mv_init_irq		= systemasic_irq_init,
 };
diff --git a/arch/sh/boards/mach-ecovec24/sdram.S b/arch/sh/boards/mach-ecovec24/sdram.S
index 8334400..3963c6f 100644
--- a/arch/sh/boards/mach-ecovec24/sdram.S
+++ b/arch/sh/boards/mach-ecovec24/sdram.S
@@ -37,6 +37,10 @@
 	.balign 4
 ENTRY(ecovec24_sdram_leave_start)
 
+	mov.l	@(SH_SLEEP_MODE, r5), r0
+	tst	#SUSP_SH_RSTANDBY, r0
+	bf	resume_rstandby
+
 	/* DBSC: put memory in auto-refresh mode */
 
 	ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
@@ -49,4 +53,59 @@
 	rts
 	 nop
 
+resume_rstandby:
+
+	/* DBSC: re-initialize and put in auto-refresh */
+
+	ED 0xFD000108, 0x00000181 /* DBPDCNT0 */
+	ED 0xFD000020, 0x015B0002 /* DBCONF */
+	ED 0xFD000030, 0x03071502 /* DBTR0 */
+	ED 0xFD000034, 0x02020102 /* DBTR1 */
+	ED 0xFD000038, 0x01090405 /* DBTR2 */
+	ED 0xFD00003C, 0x00000002 /* DBTR3 */
+	ED 0xFD000008, 0x00000005 /* DBKIND */
+	ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
+	ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
+	ED 0xFD000018, 0x00000001 /* DBCKECNT */
+
+	mov	#100,r0
+WAIT_400NS:
+	dt	r0
+	bf	WAIT_400NS
+
+	ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
+	ED 0xFD000060, 0x00020000 /* DBMRCNT (EMR2) */
+	ED 0xFD000060, 0x00030000 /* DBMRCNT (EMR3) */
+	ED 0xFD000060, 0x00010004 /* DBMRCNT (EMR) */
+	ED 0xFD000060, 0x00000532 /* DBMRCNT (MRS) */
+	ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
+	ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+	ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+	ED 0xFD000060, 0x00000432 /* DBMRCNT (MRS) */
+	ED 0xFD000060, 0x000103c0 /* DBMRCNT (EMR) */
+	ED 0xFD000060, 0x00010040 /* DBMRCNT (EMR) */
+
+	mov	#100,r0
+WAIT_400NS_2:
+	dt	r0
+	bf	WAIT_400NS_2
+
+	ED 0xFD000010, 0x00000001 /* DBEN */
+	ED 0xFD000044, 0x0000050f /* DBRFPDN1 */
+	ED 0xFD000048, 0x236800e6 /* DBRFPDN2 */
+
+	mov.l	DUMMY,r0
+	mov.l	@r0, r1 /* force single dummy read */
+
+	ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
+	ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+	ED 0xFD000108, 0x00000080 /* DBPDCNT0 */
+	ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
+
+	rts
+	 nop
+
+	.balign 4
+DUMMY:	.long	0xac400000
+
 ENTRY(ecovec24_sdram_leave_end)
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 5c24628..39ed872 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -64,18 +64,16 @@
 
 /* Heartbeat */
 static unsigned char led_pos[] = { 0, 1, 2, 3 };
+
 static struct heartbeat_data heartbeat_data = {
-	.regsize = 8,
 	.nr_bits = 4,
 	.bit_pos = led_pos,
 };
 
-static struct resource heartbeat_resources[] = {
-	[0] = {
-		.start  = 0xA405012C, /* PTG */
-		.end    = 0xA405012E - 1,
-		.flags  = IORESOURCE_MEM,
-	},
+static struct resource heartbeat_resource = {
+	.start  = 0xA405012C, /* PTG */
+	.end    = 0xA405012E - 1,
+	.flags  = IORESOURCE_MEM | IORESOURCE_MEM_8BIT,
 };
 
 static struct platform_device heartbeat_device = {
@@ -84,8 +82,8 @@
 	.dev = {
 		.platform_data = &heartbeat_data,
 	},
-	.num_resources  = ARRAY_SIZE(heartbeat_resources),
-	.resource       = heartbeat_resources,
+	.num_resources  = 1,
+	.resource       = &heartbeat_resource,
 };
 
 /* MTD */
@@ -455,7 +453,7 @@
 		.flags  = IORESOURCE_MEM,
 	},
 	[1] = {
-		.start  = 101,
+		.start  = 100,
 		.flags  = IORESOURCE_IRQ,
 	},
 };
@@ -491,7 +489,7 @@
 		.flags  = IORESOURCE_MEM,
 	},
 	[1] = {
-		.start  = 24,
+		.start  = 23,
 		.flags  = IORESOURCE_IRQ,
 	},
 };
@@ -698,13 +696,13 @@
 #define FCLKBCR		0xa415000c
 static void fsimck_init(struct clk *clk)
 {
-	u32 status = ctrl_inl(clk->enable_reg);
+	u32 status = __raw_readl(clk->enable_reg);
 
 	/* use external clock */
 	status &= ~0x000000ff;
 	status |= 0x00000080;
 
-	ctrl_outl(status, clk->enable_reg);
+	__raw_writel(status, clk->enable_reg);
 }
 
 static struct clk_ops fsimck_clk_ops = {
@@ -753,6 +751,26 @@
 	},
 };
 
+/* IrDA */
+static struct resource irda_resources[] = {
+	[0] = {
+		.name	= "IrDA",
+		.start  = 0xA45D0000,
+		.end    = 0xA45D0049,
+		.flags  = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start  = 20,
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device irda_device = {
+	.name           = "sh_sir",
+	.num_resources  = ARRAY_SIZE(irda_resources),
+	.resource       = irda_resources,
+};
+
 static struct platform_device *ecovec_devices[] __initdata = {
 	&heartbeat_device,
 	&nor_flash_device,
@@ -773,8 +791,10 @@
 	&camera_devices[1],
 	&camera_devices[2],
 	&fsi_device,
+	&irda_device,
 };
 
+#ifdef CONFIG_I2C
 #define EEPROM_ADDR 0x50
 static u8 mac_read(struct i2c_adapter *a, u8 command)
 {
@@ -817,6 +837,12 @@
 		msleep(10);
 	}
 }
+#else
+static void __init sh_eth_init(struct sh_eth_plat_data *pd)
+{
+	pr_err("unable to read sh_eth MAC address\n");
+}
+#endif
 
 #define PORT_HIZA 0xA4050158
 #define IODRIVEA  0xA405018A
@@ -831,7 +857,8 @@
 	struct clk *clk;
 
 	/* register board specific self-refresh code */
-	sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF,
+	sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF |
+					SUSP_SH_RSTANDBY,
 					&ecovec24_sdram_enter_start,
 					&ecovec24_sdram_enter_end,
 					&ecovec24_sdram_leave_start,
@@ -855,7 +882,7 @@
 	gpio_direction_output(GPIO_PTG1, 0);
 	gpio_direction_output(GPIO_PTG2, 0);
 	gpio_direction_output(GPIO_PTG3, 0);
-	ctrl_outw((ctrl_inw(PORT_HIZA) & ~(0x1 << 1)) , PORT_HIZA);
+	__raw_writew((__raw_readw(PORT_HIZA) & ~(0x1 << 1)) , PORT_HIZA);
 
 	/* enable SH-Eth */
 	gpio_request(GPIO_PTA1, NULL);
@@ -875,16 +902,16 @@
 	gpio_request(GPIO_FN_LNKSTA,       NULL);
 
 	/* enable USB */
-	ctrl_outw(0x0000, 0xA4D80000);
-	ctrl_outw(0x0000, 0xA4D90000);
+	__raw_writew(0x0000, 0xA4D80000);
+	__raw_writew(0x0000, 0xA4D90000);
 	gpio_request(GPIO_PTB3,  NULL);
 	gpio_request(GPIO_PTB4,  NULL);
 	gpio_request(GPIO_PTB5,  NULL);
 	gpio_direction_input(GPIO_PTB3);
 	gpio_direction_output(GPIO_PTB4, 0);
 	gpio_direction_output(GPIO_PTB5, 0);
-	ctrl_outw(0x0600, 0xa40501d4);
-	ctrl_outw(0x0600, 0xa4050192);
+	__raw_writew(0x0600, 0xa40501d4);
+	__raw_writew(0x0600, 0xa4050192);
 
 	if (gpio_get_value(GPIO_PTB3)) {
 		printk(KERN_INFO "USB1 function is selected\n");
@@ -925,7 +952,7 @@
 	gpio_request(GPIO_FN_LCDVSYN,  NULL);
 	gpio_request(GPIO_FN_LCDDON,   NULL);
 	gpio_request(GPIO_FN_LCDLCLK,  NULL);
-	ctrl_outw((ctrl_inw(PORT_HIZA) & ~0x0001), PORT_HIZA);
+	__raw_writew((__raw_readw(PORT_HIZA) & ~0x0001), PORT_HIZA);
 
 	gpio_request(GPIO_PTE6, NULL);
 	gpio_request(GPIO_PTU1, NULL);
@@ -937,7 +964,7 @@
 	gpio_direction_output(GPIO_PTA2, 0);
 
 	/* I/O buffer drive ability is high */
-	ctrl_outw((ctrl_inw(IODRIVEA) & ~0x00c0) | 0x0080 , IODRIVEA);
+	__raw_writew((__raw_readw(IODRIVEA) & ~0x00c0) | 0x0080 , IODRIVEA);
 
 	if (gpio_get_value(GPIO_PTE6)) {
 		/* DVI */
@@ -1069,7 +1096,7 @@
 	gpio_direction_output(GPIO_PTB7, 0);
 
 	/* I/O buffer drive ability is high for SDHI1 */
-	ctrl_outw((ctrl_inw(IODRIVEA) & ~0x3000) | 0x2000 , IODRIVEA);
+	__raw_writew((__raw_readw(IODRIVEA) & ~0x3000) | 0x2000 , IODRIVEA);
 #else
 	/* enable MSIOF0 on CN11 (needs DS2.4 set to OFF) */
 	gpio_request(GPIO_FN_MSIOF0_TXD, NULL);
@@ -1107,6 +1134,11 @@
 	gpio_request(GPIO_FN_FSIOBLRCK,  NULL);
 	gpio_request(GPIO_FN_CLKAUDIOBO, NULL);
 
+	/* set SPU2 clock to 83.4 MHz */
+	clk = clk_get(NULL, "spu_clk");
+	clk_set_rate(clk, clk_round_rate(clk, 83333333));
+	clk_put(clk);
+
 	/* change parent of FSI B */
 	clk = clk_get(NULL, "fsib_clk");
 	clk_register(&fsimckb_clk);
@@ -1123,6 +1155,17 @@
 	gpio_request(GPIO_FN_INTC_IRQ1, NULL);
 	gpio_direction_input(GPIO_FN_INTC_IRQ1);
 
+	/* set VPU clock to 166 MHz */
+	clk = clk_get(NULL, "vpu_clk");
+	clk_set_rate(clk, clk_round_rate(clk, 166000000));
+	clk_put(clk);
+
+	/* enable IrDA */
+	gpio_request(GPIO_FN_IRDA_OUT, NULL);
+	gpio_request(GPIO_FN_IRDA_IN,  NULL);
+	gpio_request(GPIO_PTU5, NULL);
+	gpio_direction_output(GPIO_PTU5, 0);
+
 	/* enable I2C device */
 	i2c_register_board_info(0, i2c0_devices,
 				ARRAY_SIZE(i2c0_devices));
diff --git a/arch/sh/boards/mach-highlander/irq-r7780mp.c b/arch/sh/boards/mach-highlander/irq-r7780mp.c
index 83c28bc..9893fd3 100644
--- a/arch/sh/boards/mach-highlander/irq-r7780mp.c
+++ b/arch/sh/boards/mach-highlander/irq-r7780mp.c
@@ -64,7 +64,7 @@
 
 unsigned char * __init highlander_plat_irq_setup(void)
 {
-	if ((ctrl_inw(0xa4000700) & 0xf000) == 0x2000) {
+	if ((__raw_readw(0xa4000700) & 0xf000) == 0x2000) {
 		printk(KERN_INFO "Using r7780mp interrupt controller.\n");
 		register_intc_controller(&intc_desc);
 		return irl2irq;
diff --git a/arch/sh/boards/mach-highlander/irq-r7780rp.c b/arch/sh/boards/mach-highlander/irq-r7780rp.c
index b721e86..0805b21 100644
--- a/arch/sh/boards/mach-highlander/irq-r7780rp.c
+++ b/arch/sh/boards/mach-highlander/irq-r7780rp.c
@@ -57,7 +57,7 @@
 
 unsigned char * __init highlander_plat_irq_setup(void)
 {
-	if (ctrl_inw(0xa5000600)) {
+	if (__raw_readw(0xa5000600)) {
 		printk(KERN_INFO "Using r7780rp interrupt controller.\n");
 		register_intc_controller(&intc_desc);
 		return irl2irq;
diff --git a/arch/sh/boards/mach-highlander/irq-r7785rp.c b/arch/sh/boards/mach-highlander/irq-r7785rp.c
index 3811b06..558b248 100644
--- a/arch/sh/boards/mach-highlander/irq-r7785rp.c
+++ b/arch/sh/boards/mach-highlander/irq-r7785rp.c
@@ -66,20 +66,20 @@
 
 unsigned char * __init highlander_plat_irq_setup(void)
 {
-	if ((ctrl_inw(0xa4000158) & 0xf000) != 0x1000)
+	if ((__raw_readw(0xa4000158) & 0xf000) != 0x1000)
 		return NULL;
 
 	printk(KERN_INFO "Using r7785rp interrupt controller.\n");
 
-	ctrl_outw(0x0000, PA_IRLSSR1);	/* FPGA IRLSSR1(CF_CD clear) */
+	__raw_writew(0x0000, PA_IRLSSR1);	/* FPGA IRLSSR1(CF_CD clear) */
 
 	/* Setup the FPGA IRL */
-	ctrl_outw(0x0000, PA_IRLPRA);	/* FPGA IRLA */
-	ctrl_outw(0xe598, PA_IRLPRB);	/* FPGA IRLB */
-	ctrl_outw(0x7060, PA_IRLPRC);	/* FPGA IRLC */
-	ctrl_outw(0x0000, PA_IRLPRD);	/* FPGA IRLD */
-	ctrl_outw(0x4321, PA_IRLPRE);	/* FPGA IRLE */
-	ctrl_outw(0xdcba, PA_IRLPRF);	/* FPGA IRLF */
+	__raw_writew(0x0000, PA_IRLPRA);	/* FPGA IRLA */
+	__raw_writew(0xe598, PA_IRLPRB);	/* FPGA IRLB */
+	__raw_writew(0x7060, PA_IRLPRC);	/* FPGA IRLC */
+	__raw_writew(0x0000, PA_IRLPRD);	/* FPGA IRLD */
+	__raw_writew(0x4321, PA_IRLPRE);	/* FPGA IRLE */
+	__raw_writew(0xdcba, PA_IRLPRF);	/* FPGA IRLF */
 
 	register_intc_controller(&intc_desc);
 	return irl2irq;
diff --git a/arch/sh/boards/mach-highlander/psw.c b/arch/sh/boards/mach-highlander/psw.c
index 37b1a2e..5227863 100644
--- a/arch/sh/boards/mach-highlander/psw.c
+++ b/arch/sh/boards/mach-highlander/psw.c
@@ -24,7 +24,7 @@
 	unsigned int l, mask;
 	int ret = 0;
 
-	l = ctrl_inw(PA_DBSW);
+	l = __raw_readw(PA_DBSW);
 
 	/* Nothing to do if there's no state change */
 	if (psw->state) {
@@ -45,7 +45,7 @@
 out:
 	/* Clear the switch IRQs */
 	l |= (0x7 << 12);
-	ctrl_outw(l, PA_DBSW);
+	__raw_writew(l, PA_DBSW);
 
 	return IRQ_RETVAL(ret);
 }
diff --git a/arch/sh/boards/mach-highlander/setup.c b/arch/sh/boards/mach-highlander/setup.c
index f663c14..affd667 100644
--- a/arch/sh/boards/mach-highlander/setup.c
+++ b/arch/sh/boards/mach-highlander/setup.c
@@ -311,13 +311,13 @@
  */
 static int ivdr_clk_enable(struct clk *clk)
 {
-	ctrl_outw(ctrl_inw(PA_IVDRCTL) | (1 << IVDR_CK_ON), PA_IVDRCTL);
+	__raw_writew(__raw_readw(PA_IVDRCTL) | (1 << IVDR_CK_ON), PA_IVDRCTL);
 	return 0;
 }
 
 static void ivdr_clk_disable(struct clk *clk)
 {
-	ctrl_outw(ctrl_inw(PA_IVDRCTL) & ~(1 << IVDR_CK_ON), PA_IVDRCTL);
+	__raw_writew(__raw_readw(PA_IVDRCTL) & ~(1 << IVDR_CK_ON), PA_IVDRCTL);
 }
 
 static struct clk_ops ivdr_clk_ops = {
@@ -337,7 +337,7 @@
 static void r7780rp_power_off(void)
 {
 	if (mach_is_r7780mp() || mach_is_r7785rp())
-		ctrl_outw(0x0001, PA_POFF);
+		__raw_writew(0x0001, PA_POFF);
 }
 
 /*
@@ -345,7 +345,7 @@
  */
 static void __init highlander_setup(char **cmdline_p)
 {
-	u16 ver = ctrl_inw(PA_VERREG);
+	u16 ver = __raw_readw(PA_VERREG);
 	int i;
 
 	printk(KERN_INFO "Renesas Solutions Highlander %s support.\n",
@@ -370,12 +370,12 @@
 		clk_enable(clk);
 	}
 
-	ctrl_outw(0x0000, PA_OBLED);	/* Clear LED. */
+	__raw_writew(0x0000, PA_OBLED);	/* Clear LED. */
 
 	if (mach_is_r7780rp())
-		ctrl_outw(0x0001, PA_SDPOW);	/* SD Power ON */
+		__raw_writew(0x0001, PA_SDPOW);	/* SD Power ON */
 
-	ctrl_outw(ctrl_inw(PA_IVDRCTL) | 0x01, PA_IVDRCTL);	/* Si13112 */
+	__raw_writew(__raw_readw(PA_IVDRCTL) | 0x01, PA_IVDRCTL);	/* Si13112 */
 
 	pm_power_off = r7780rp_power_off;
 }
diff --git a/arch/sh/boards/mach-hp6xx/hp6xx_apm.c b/arch/sh/boards/mach-hp6xx/hp6xx_apm.c
index e85212f..b49535c 100644
--- a/arch/sh/boards/mach-hp6xx/hp6xx_apm.c
+++ b/arch/sh/boards/mach-hp6xx/hp6xx_apm.c
@@ -53,7 +53,7 @@
 	info->ac_line_status = (battery > HP680_BATTERY_AC_ON) ?
 			 APM_AC_ONLINE : APM_AC_OFFLINE;
 
-	pgdr = ctrl_inb(PGDR);
+	pgdr = __raw_readb(PGDR);
 	if (pgdr & PGDR_MAIN_BATTERY_OUT) {
 		info->battery_status	= APM_BATTERY_STATUS_NOT_PRESENT;
 		info->battery_flag	= 0x80;
diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
index d936c1a..4499a37 100644
--- a/arch/sh/boards/mach-hp6xx/pm.c
+++ b/arch/sh/boards/mach-hp6xx/pm.c
@@ -53,17 +53,17 @@
 	sh_wdt_write_cnt(0);
 
 	/* disable PLL1 */
-	frqcr = ctrl_inw(FRQCR);
+	frqcr = __raw_readw(FRQCR);
 	frqcr &= ~(FRQCR_PLLEN | FRQCR_PSTBY);
-	ctrl_outw(frqcr, FRQCR);
+	__raw_writew(frqcr, FRQCR);
 
 	/* enable standby */
-	stbcr = ctrl_inb(STBCR);
-	ctrl_outb(stbcr | STBCR_STBY | STBCR_MSTP2, STBCR);
+	stbcr = __raw_readb(STBCR);
+	__raw_writeb(stbcr | STBCR_STBY | STBCR_MSTP2, STBCR);
 
 	/* set self-refresh */
-	mcr = ctrl_inw(MCR);
-	ctrl_outw(mcr & ~MCR_RFSH, MCR);
+	mcr = __raw_readw(MCR);
+	__raw_writew(mcr & ~MCR_RFSH, MCR);
 
 	/* set interrupt handler */
 	asm volatile("stc vbr, %0" : "=r" (vbr_old));
@@ -73,8 +73,8 @@
 	       &wakeup_start, &wakeup_end - &wakeup_start);
 	asm volatile("ldc %0, vbr" : : "r" (vbr_new));
 
-	ctrl_outw(0, RTCNT);
-	ctrl_outw(mcr | MCR_RFSH | MCR_RMODE, MCR);
+	__raw_writew(0, RTCNT);
+	__raw_writew(mcr | MCR_RFSH | MCR_RMODE, MCR);
 
 	cpu_sleep();
 
@@ -83,14 +83,14 @@
 	free_page(vbr_new);
 
 	/* enable PLL1 */
-	frqcr = ctrl_inw(FRQCR);
+	frqcr = __raw_readw(FRQCR);
 	frqcr |= FRQCR_PSTBY;
-	ctrl_outw(frqcr, FRQCR);
+	__raw_writew(frqcr, FRQCR);
 	udelay(50);
 	frqcr |= FRQCR_PLLEN;
-	ctrl_outw(frqcr, FRQCR);
+	__raw_writew(frqcr, FRQCR);
 
-	ctrl_outb(stbcr, STBCR);
+	__raw_writeb(stbcr, STBCR);
 
 	clear_bl_bit();
 }
@@ -115,21 +115,21 @@
 	outw(hd64461_stbcr, HD64461_STBCR);
 #endif
 
-	ctrl_outb(0x1f, DACR);
+	__raw_writeb(0x1f, DACR);
 
-	stbcr = ctrl_inb(STBCR);
-	ctrl_outb(0x01, STBCR);
+	stbcr = __raw_readb(STBCR);
+	__raw_writeb(0x01, STBCR);
 
-	stbcr2 = ctrl_inb(STBCR2);
-	ctrl_outb(0x7f , STBCR2);
+	stbcr2 = __raw_readb(STBCR2);
+	__raw_writeb(0x7f , STBCR2);
 
 	outw(0xf07f, HD64461_SCPUCR);
 
 	pm_enter();
 
 	outw(0, HD64461_SCPUCR);
-	ctrl_outb(stbcr, STBCR);
-	ctrl_outb(stbcr2, STBCR2);
+	__raw_writeb(stbcr, STBCR);
+	__raw_writeb(stbcr2, STBCR2);
 
 #ifdef CONFIG_HD64461_ENABLER
 	hd64461_stbcr = inw(HD64461_STBCR);
diff --git a/arch/sh/boards/mach-hp6xx/setup.c b/arch/sh/boards/mach-hp6xx/setup.c
index e6dd5e9..8c9add5 100644
--- a/arch/sh/boards/mach-hp6xx/setup.c
+++ b/arch/sh/boards/mach-hp6xx/setup.c
@@ -149,19 +149,19 @@
 
 	sh_dac_output(0, DAC_SPEAKER_VOLUME);
 	sh_dac_disable(DAC_SPEAKER_VOLUME);
-	v8 = ctrl_inb(DACR);
+	v8 = __raw_readb(DACR);
 	v8 &= ~DACR_DAE;
-	ctrl_outb(v8,DACR);
+	__raw_writeb(v8,DACR);
 
-	v8 = ctrl_inb(SCPDR);
+	v8 = __raw_readb(SCPDR);
 	v8 |= SCPDR_TS_SCAN_X | SCPDR_TS_SCAN_Y;
 	v8 &= ~SCPDR_TS_SCAN_ENABLE;
-	ctrl_outb(v8, SCPDR);
+	__raw_writeb(v8, SCPDR);
 
-	v = ctrl_inw(SCPCR);
+	v = __raw_readw(SCPCR);
 	v &= ~SCPCR_TS_MASK;
 	v |= SCPCR_TS_ENABLE;
-	ctrl_outw(v, SCPCR);
+	__raw_writew(v, SCPCR);
 }
 device_initcall(hp6xx_devices_setup);
 
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 5d7b5d9..b2cd0ed 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -282,7 +282,7 @@
 		 * use 1.8 V for VccQ_VIO
 		 * use 2.85V for VccQ_SR
 		 */
-		ctrl_outw((ctrl_inw(DRVCRB) & ~0x0003) | 0x0001, DRVCRB);
+		__raw_writew((__raw_readw(DRVCRB) & ~0x0003) | 0x0001, DRVCRB);
 
 		/* reset clear */
 		ret = gpio_request(GPIO_PTB4, NULL);
@@ -351,7 +351,7 @@
 		.flags  = IORESOURCE_MEM,
 	},
 	[1] = {
-		.start  = 101,
+		.start  = 100,
 		.flags  = IORESOURCE_IRQ,
 	},
 };
@@ -492,13 +492,13 @@
 	if (kfr2r09_usb0_gadget_i2c_setup() != 0)
 		return -ENODEV; /* unable to configure using i2c */
 
-	ctrl_outw((ctrl_inw(PORT_MSELCRB) & ~0xc000) | 0x8000, PORT_MSELCRB);
+	__raw_writew((__raw_readw(PORT_MSELCRB) & ~0xc000) | 0x8000, PORT_MSELCRB);
 	gpio_request(GPIO_FN_PDSTATUS, NULL); /* R-standby disables USB clock */
 	gpio_request(GPIO_PTV6, NULL); /* USBCLK_ON */
 	gpio_direction_output(GPIO_PTV6, 1); /* USBCLK_ON = H */
 	msleep(20); /* wait 20ms to let the clock settle */
 	clk_enable(clk_get(NULL, "usb0"));
-	ctrl_outw(0x0600, 0xa40501d4);
+	__raw_writew(0x0600, 0xa40501d4);
 
 	return 0;
 }
@@ -526,12 +526,12 @@
 	gpio_direction_output(GPIO_PTG3, 1); /* HPON_ON = H */
 
 	/* setup NOR flash at CS0 */
-	ctrl_outl(0x36db0400, BSC_CS0BCR);
-	ctrl_outl(0x00000500, BSC_CS0WCR);
+	__raw_writel(0x36db0400, BSC_CS0BCR);
+	__raw_writel(0x00000500, BSC_CS0WCR);
 
 	/* setup NAND flash at CS4 */
-	ctrl_outl(0x36db0400, BSC_CS4BCR);
-	ctrl_outl(0x00000500, BSC_CS4WCR);
+	__raw_writel(0x36db0400, BSC_CS4BCR);
+	__raw_writel(0x00000500, BSC_CS4WCR);
 
 	/* setup KEYSC pins */
 	gpio_request(GPIO_FN_KEYOUT0, NULL);
diff --git a/arch/sh/boards/mach-landisk/gio.c b/arch/sh/boards/mach-landisk/gio.c
index 5280131..01e6abb 100644
--- a/arch/sh/boards/mach-landisk/gio.c
+++ b/arch/sh/boards/mach-landisk/gio.c
@@ -76,39 +76,39 @@
 		break;
 
 	case GIODRV_IOCSGIODATA1:	/* write byte */
-		ctrl_outb((unsigned char)(0x0ff & data), addr);
+		__raw_writeb((unsigned char)(0x0ff & data), addr);
 		break;
 
 	case GIODRV_IOCSGIODATA2:	/* write word */
 		if (addr & 0x01) {
 			return -EFAULT;
 		}
-		ctrl_outw((unsigned short int)(0x0ffff & data), addr);
+		__raw_writew((unsigned short int)(0x0ffff & data), addr);
 		break;
 
 	case GIODRV_IOCSGIODATA4:	/* write long */
 		if (addr & 0x03) {
 			return -EFAULT;
 		}
-		ctrl_outl(data, addr);
+		__raw_writel(data, addr);
 		break;
 
 	case GIODRV_IOCGGIODATA1:	/* read byte */
-		data = ctrl_inb(addr);
+		data = __raw_readb(addr);
 		break;
 
 	case GIODRV_IOCGGIODATA2:	/* read word */
 		if (addr & 0x01) {
 			return -EFAULT;
 		}
-		data = ctrl_inw(addr);
+		data = __raw_readw(addr);
 		break;
 
 	case GIODRV_IOCGGIODATA4:	/* read long */
 		if (addr & 0x03) {
 			return -EFAULT;
 		}
-		data = ctrl_inl(addr);
+		data = __raw_readl(addr);
 		break;
 	default:
 		return -EFAULT;
diff --git a/arch/sh/boards/mach-landisk/irq.c b/arch/sh/boards/mach-landisk/irq.c
index 7b284cd..96f38a4 100644
--- a/arch/sh/boards/mach-landisk/irq.c
+++ b/arch/sh/boards/mach-landisk/irq.c
@@ -22,14 +22,14 @@
 {
 	unsigned char mask = 0xff ^ (0x01 << (irq - 5));
 
-	ctrl_outb(ctrl_inb(PA_IMASK) & mask, PA_IMASK);
+	__raw_writeb(__raw_readb(PA_IMASK) & mask, PA_IMASK);
 }
 
 static void enable_landisk_irq(unsigned int irq)
 {
 	unsigned char value = (0x01 << (irq - 5));
 
-	ctrl_outb(ctrl_inb(PA_IMASK) | value, PA_IMASK);
+	__raw_writeb(__raw_readb(PA_IMASK) | value, PA_IMASK);
 }
 
 static struct irq_chip landisk_irq_chip __read_mostly = {
@@ -52,5 +52,5 @@
 					      handle_level_irq, "level");
 		enable_landisk_irq(i);
 	}
-	ctrl_outb(0x00, PA_PWRINT_CLR);
+	__raw_writeb(0x00, PA_PWRINT_CLR);
 }
diff --git a/arch/sh/boards/mach-landisk/psw.c b/arch/sh/boards/mach-landisk/psw.c
index e6b0efa..bef8352 100644
--- a/arch/sh/boards/mach-landisk/psw.c
+++ b/arch/sh/boards/mach-landisk/psw.c
@@ -25,7 +25,7 @@
 	unsigned int sw_value;
 	int ret = 0;
 
-	sw_value = (0x0ff & (~ctrl_inb(PA_STATUS)));
+	sw_value = (0x0ff & (~__raw_readb(PA_STATUS)));
 
 	/* Nothing to do if there's no state change */
 	if (psw->state) {
@@ -42,7 +42,7 @@
 
 out:
 	/* Clear the switch IRQs */
-	ctrl_outb(0x00, PA_PWRINT_CLR);
+	__raw_writeb(0x00, PA_PWRINT_CLR);
 
 	return IRQ_RETVAL(ret);
 }
diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c
index db22ea2..50337acc 100644
--- a/arch/sh/boards/mach-landisk/setup.c
+++ b/arch/sh/boards/mach-landisk/setup.c
@@ -25,7 +25,7 @@
 
 static void landisk_power_off(void)
 {
-        ctrl_outb(0x01, PA_SHUTDOWN);
+        __raw_writeb(0x01, PA_SHUTDOWN);
 }
 
 static struct resource cf_ide_resources[3];
@@ -63,7 +63,7 @@
 	/* open I/O area window */
 	paddrbase = virt_to_phys((void *)PA_AREA5_IO);
 	prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
-	cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot);
+	cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot));
 	if (!cf_ide_base) {
 		printk("allocate_cf_area : can't open CF I/O window!\n");
 		return -ENOMEM;
@@ -88,7 +88,7 @@
 static void __init landisk_setup(char **cmdline_p)
 {
         /* LED ON */
-	ctrl_outb(ctrl_inb(PA_LED) | 0x03, PA_LED);
+	__raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED);
 
 	printk(KERN_INFO "I-O DATA DEVICE, INC. \"LANDISK Series\" support.\n");
 	pm_power_off = landisk_power_off;
diff --git a/arch/sh/boards/mach-lboxre2/setup.c b/arch/sh/boards/mach-lboxre2/setup.c
index 2b0b581..79b4e0d7 100644
--- a/arch/sh/boards/mach-lboxre2/setup.c
+++ b/arch/sh/boards/mach-lboxre2/setup.c
@@ -56,8 +56,8 @@
 	/* open I/O area window */
 	paddrbase = virt_to_phys((void*)PA_AREA5_IO);
 	psize = PAGE_SIZE;
-	prot = PAGE_KERNEL_PCC( 1 , _PAGE_PCC_IO16);
-	cf0_io_base = (u32)p3_ioremap(paddrbase, psize, prot.pgprot);
+	prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
+	cf0_io_base = (u32)ioremap_prot(paddrbase, psize, pgprot_val(prot));
 	if (!cf0_io_base) {
 		printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ );
 		return -ENOMEM;
diff --git a/arch/sh/boards/mach-microdev/io.c b/arch/sh/boards/mach-microdev/io.c
index 52dd748..2960c65 100644
--- a/arch/sh/boards/mach-microdev/io.c
+++ b/arch/sh/boards/mach-microdev/io.c
@@ -141,10 +141,10 @@
 #if defined(CONFIG_PCI)
 	/* System board present, just make a dummy SRAM access.  (CS0 will be
 	   mapped to PCI memory, probably good to avoid it.) */
-	ctrl_inw(0xa6800000);
+	__raw_readw(0xa6800000);
 #else
 	/* CS0 will be mapped to flash, ROM etc so safe to access it. */
-	ctrl_inw(0xa0000000);
+	__raw_readw(0xa0000000);
 #endif
 }
 
diff --git a/arch/sh/boards/mach-microdev/irq.c b/arch/sh/boards/mach-microdev/irq.c
index b551963..a26d166 100644
--- a/arch/sh/boards/mach-microdev/irq.c
+++ b/arch/sh/boards/mach-microdev/irq.c
@@ -88,7 +88,7 @@
 	fpgaIrq = fpgaIrqTable[irq].fpgaIrq;
 
 	/* disable interrupts on the FPGA INTC register */
-	ctrl_outl(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTDSB_REG);
+	__raw_writel(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTDSB_REG);
 }
 
 static void enable_microdev_irq(unsigned int irq)
@@ -107,13 +107,13 @@
 	priorityReg = MICRODEV_FPGA_INTPRI_REG(fpgaIrq);
 
 	/* set priority for the interrupt */
-	priorities = ctrl_inl(priorityReg);
+	priorities = __raw_readl(priorityReg);
 	priorities &= ~MICRODEV_FPGA_INTPRI_MASK(fpgaIrq);
 	priorities |= MICRODEV_FPGA_INTPRI_LEVEL(fpgaIrq, pri);
-	ctrl_outl(priorities, priorityReg);
+	__raw_writel(priorities, priorityReg);
 
 	/* enable interrupts on the FPGA INTC register */
-	ctrl_outl(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTENB_REG);
+	__raw_writel(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTENB_REG);
 }
 
 /* This function sets the desired irq handler to be a MicroDev type */
@@ -134,7 +134,7 @@
 	int i;
 
 	/* disable interrupts on the FPGA INTC register */
-	ctrl_outl(~0ul, MICRODEV_FPGA_INTDSB_REG);
+	__raw_writel(~0ul, MICRODEV_FPGA_INTDSB_REG);
 
 	for (i = 0; i < NUM_EXTERNAL_IRQS; i++)
 		make_microdev_irq(i);
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 9b4676f..be300aa 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -397,7 +397,7 @@
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
-		.start	= 101,
+		.start	= 100,
 		.flags  = IORESOURCE_IRQ,
 	},
 };
@@ -496,28 +496,16 @@
 					&migor_sdram_enter_end,
 					&migor_sdram_leave_start,
 					&migor_sdram_leave_end);
-#ifdef CONFIG_PM
 	/* Let D11 LED show STATUS0 */
 	gpio_request(GPIO_FN_STATUS0, NULL);
 
 	/* Lit D12 LED show PDSTATUS */
 	gpio_request(GPIO_FN_PDSTATUS, NULL);
-#else
-	/* Lit D11 LED */
-	gpio_request(GPIO_PTJ7, NULL);
-	gpio_direction_output(GPIO_PTJ7, 1);
-	gpio_export(GPIO_PTJ7, 0);
-
-	/* Lit D12 LED */
-	gpio_request(GPIO_PTJ5, NULL);
-	gpio_direction_output(GPIO_PTJ5, 1);
-	gpio_export(GPIO_PTJ5, 0);
-#endif
 
 	/* SMC91C111 - Enable IRQ0, Setup CS4 for 16-bit fast access */
 	gpio_request(GPIO_FN_IRQ0, NULL);
-	ctrl_outl(0x00003400, BSC_CS4BCR);
-	ctrl_outl(0x00110080, BSC_CS4WCR);
+	__raw_writel(0x00003400, BSC_CS4BCR);
+	__raw_writel(0x00110080, BSC_CS4WCR);
 
 	/* KEYSC */
 	gpio_request(GPIO_FN_KEYOUT0, NULL);
@@ -533,7 +521,7 @@
 
 	/* NAND Flash */
 	gpio_request(GPIO_FN_CS6A_CE2B, NULL);
-	ctrl_outl((ctrl_inl(BSC_CS6ABCR) & ~0x0600) | 0x0200, BSC_CS6ABCR);
+	__raw_writel((__raw_readl(BSC_CS6ABCR) & ~0x0600) | 0x0200, BSC_CS6ABCR);
 	gpio_request(GPIO_PTA1, NULL);
 	gpio_direction_input(GPIO_PTA1);
 
@@ -627,7 +615,7 @@
 #else
 	gpio_direction_output(GPIO_PTT0, 1);
 #endif
-	ctrl_outw(ctrl_inw(PORT_MSELCRB) | 0x2000, PORT_MSELCRB); /* D15->D8 */
+	__raw_writew(__raw_readw(PORT_MSELCRB) | 0x2000, PORT_MSELCRB); /* D15->D8 */
 
 	platform_resource_setup_memory(&migor_ceu_device, "ceu", 4 << 20);
 
diff --git a/arch/sh/boards/mach-r2d/irq.c b/arch/sh/boards/mach-r2d/irq.c
index 78d7b27..574f009 100644
--- a/arch/sh/boards/mach-r2d/irq.c
+++ b/arch/sh/boards/mach-r2d/irq.c
@@ -129,7 +129,7 @@
 {
 	struct intc_desc *d;
 
-	switch (ctrl_inw(PA_VERREG) & 0xf0) {
+	switch (__raw_readw(PA_VERREG) & 0xf0) {
 #ifdef CONFIG_RTS7751R2D_PLUS
 	case 0x10:
 		printk(KERN_INFO "Using R2D-PLUS interrupt controller.\n");
@@ -147,7 +147,7 @@
 #endif
 	default:
 		printk(KERN_INFO "Unknown R2D interrupt controller 0x%04x\n",
-		       ctrl_inw(PA_VERREG));
+		       __raw_readw(PA_VERREG));
 		return;
 	}
 
diff --git a/arch/sh/boards/mach-r2d/setup.c b/arch/sh/boards/mach-r2d/setup.c
index a625ecb..b84df6a 100644
--- a/arch/sh/boards/mach-r2d/setup.c
+++ b/arch/sh/boards/mach-r2d/setup.c
@@ -70,7 +70,7 @@
 static void r2d_chip_select(struct sh_spi_info *spi, int cs, int state)
 {
 	BUG_ON(cs != 0);  /* Single Epson RTC-9701JE attached on CS0 */
-	ctrl_outw(state == BITBANG_CS_ACTIVE, PA_RTCCE);
+	__raw_writew(state == BITBANG_CS_ACTIVE, PA_RTCCE);
 }
 
 static struct sh_spi_info spi_info = {
@@ -262,7 +262,7 @@
 
 static void rts7751r2d_power_off(void)
 {
-	ctrl_outw(0x0001, PA_POWOFF);
+	__raw_writew(0x0001, PA_POWOFF);
 }
 
 /*
@@ -271,14 +271,14 @@
 static void __init rts7751r2d_setup(char **cmdline_p)
 {
 	void __iomem *sm501_reg;
-	u16 ver = ctrl_inw(PA_VERREG);
+	u16 ver = __raw_readw(PA_VERREG);
 
 	printk(KERN_INFO "Renesas Technology Sales RTS7751R2D support.\n");
 
 	printk(KERN_INFO "FPGA version:%d (revision:%d)\n",
 					(ver >> 4) & 0xf, ver & 0xf);
 
-	ctrl_outw(0x0000, PA_OUTPORT);
+	__raw_writew(0x0000, PA_OUTPORT);
 	pm_power_off = rts7751r2d_power_off;
 
 	/* sm501 dram configuration:
diff --git a/arch/sh/boards/mach-rsk/devices-rsk7203.c b/arch/sh/boards/mach-rsk/devices-rsk7203.c
index c37617e..4fa08ba 100644
--- a/arch/sh/boards/mach-rsk/devices-rsk7203.c
+++ b/arch/sh/boards/mach-rsk/devices-rsk7203.c
@@ -96,7 +96,7 @@
 	gpio_request(GPIO_FN_RXD0, NULL);
 
 	/* Setup LAN9118: CS1 in 16-bit Big Endian Mode, IRQ0 at Port B */
-	ctrl_outl(0x36db0400, 0xfffc0008); /* CS1BCR */
+	__raw_writel(0x36db0400, 0xfffc0008); /* CS1BCR */
 	gpio_request(GPIO_FN_IRQ0_PB, NULL);
 
 	return platform_add_devices(rsk7203_devices,
diff --git a/arch/sh/boards/mach-sdk7780/irq.c b/arch/sh/boards/mach-sdk7780/irq.c
index 8555581..e5f7564 100644
--- a/arch/sh/boards/mach-sdk7780/irq.c
+++ b/arch/sh/boards/mach-sdk7780/irq.c
@@ -37,9 +37,9 @@
 {
 	printk(KERN_INFO "Using SDK7780 interrupt controller.\n");
 
-	ctrl_outw(0xFFFF, FPGA_IRQ0MR);
+	__raw_writew(0xFFFF, FPGA_IRQ0MR);
 	/* Setup IRL 0-3 */
-	ctrl_outw(0x0003, FPGA_IMSR);
+	__raw_writew(0x0003, FPGA_IMSR);
 	plat_irq_setup_pins(IRQ_MODE_IRL3210);
 
 	register_intc_controller(&fpga_intc_desc);
diff --git a/arch/sh/boards/mach-sdk7780/setup.c b/arch/sh/boards/mach-sdk7780/setup.c
index aad94a7..4da38db 100644
--- a/arch/sh/boards/mach-sdk7780/setup.c
+++ b/arch/sh/boards/mach-sdk7780/setup.c
@@ -20,27 +20,18 @@
 
 #define GPIO_PECR        0xFFEA0008
 
-//* Heartbeat */
-static struct heartbeat_data heartbeat_data = {
-	.regsize = 16,
-};
-
-static struct resource heartbeat_resources[] = {
-	[0] = {
-		.start  = PA_LED,
-		.end    = PA_LED,
-		.flags  = IORESOURCE_MEM,
-	},
+/* Heartbeat */
+static struct resource heartbeat_resource = {
+	.start  = PA_LED,
+	.end    = PA_LED,
+	.flags  = IORESOURCE_MEM | IORESOURCE_MEM_16BIT,
 };
 
 static struct platform_device heartbeat_device = {
 	.name           = "heartbeat",
 	.id             = -1,
-	.dev = {
-		.platform_data = &heartbeat_data,
-	},
-	.num_resources  = ARRAY_SIZE(heartbeat_resources),
-	.resource       = heartbeat_resources,
+	.num_resources  = 1,
+	.resource       = &heartbeat_resource,
 };
 
 /* SMC91x */
@@ -83,8 +74,8 @@
 
 static void __init sdk7780_setup(char **cmdline_p)
 {
-	u16 ver = ctrl_inw(FPGA_FPVERR);
-	u16 dateStamp = ctrl_inw(FPGA_FPDATER);
+	u16 ver = __raw_readw(FPGA_FPVERR);
+	u16 dateStamp = __raw_readw(FPGA_FPDATER);
 
 	printk(KERN_INFO "Renesas Technology Europe SDK7780 support.\n");
 	printk(KERN_INFO "Board version: %d (revision %d), "
@@ -94,7 +85,7 @@
 			 dateStamp);
 
 	/* Setup pin mux'ing for PCIC */
-	ctrl_outw(0x0000, GPIO_PECR);
+	__raw_writew(0x0000, GPIO_PECR);
 }
 
 /*
diff --git a/arch/sh/boards/mach-sdk7786/Makefile b/arch/sh/boards/mach-sdk7786/Makefile
new file mode 100644
index 0000000..a29f19e
--- /dev/null
+++ b/arch/sh/boards/mach-sdk7786/Makefile
@@ -0,0 +1 @@
+obj-y	:= setup.o fpga.o irq.o
diff --git a/arch/sh/boards/mach-sdk7786/fpga.c b/arch/sh/boards/mach-sdk7786/fpga.c
new file mode 100644
index 0000000..3e4ec66
--- /dev/null
+++ b/arch/sh/boards/mach-sdk7786/fpga.c
@@ -0,0 +1,72 @@
+/*
+ * SDK7786 FPGA Support.
+ *
+ * Copyright (C) 2010  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bcd.h>
+#include <mach/fpga.h>
+#include <asm/sizes.h>
+
+#define FPGA_REGS_OFFSET	0x03fff800
+#define FPGA_REGS_SIZE		0x490
+
+/*
+ * The FPGA can be mapped in any of the generally available areas,
+ * so we attempt to scan for it using the fixed SRSTR read magic.
+ *
+ * Once the FPGA is located, the rest of the mapping data for the other
+ * components can be determined dynamically from its section mapping
+ * registers.
+ */
+static void __iomem *sdk7786_fpga_probe(void)
+{
+	unsigned long area;
+	void __iomem *base;
+
+	/*
+	 * Iterate over all of the areas where the FPGA could be mapped.
+	 * The possible range is anywhere from area 0 through 6, area 7
+	 * is reserved.
+	 */
+	for (area = PA_AREA0; area < PA_AREA7; area += SZ_64M) {
+		base = ioremap_nocache(area + FPGA_REGS_OFFSET, FPGA_REGS_SIZE);
+		if (!base) {
+			/* Failed to remap this area, move along. */
+			continue;
+		}
+
+		if (ioread16(base + SRSTR) == SRSTR_MAGIC)
+			return base;	/* Found it! */
+
+		iounmap(base);
+	}
+
+	return NULL;
+}
+
+void __iomem *sdk7786_fpga_base;
+
+void __init sdk7786_fpga_init(void)
+{
+	u16 version, date;
+
+	sdk7786_fpga_base = sdk7786_fpga_probe();
+	if (unlikely(!sdk7786_fpga_base)) {
+		panic("FPGA detection failed.\n");
+		return;
+	}
+
+	version = fpga_read_reg(FPGAVR);
+	date = fpga_read_reg(FPGADR);
+
+	pr_info("\tFPGA version:\t%d.%d (built on %d/%d/%d)\n",
+		bcd2bin(version >> 8) & 0xf, bcd2bin(version & 0xf),
+		((date >> 12) & 0xf) + 2000,
+		(date >> 8) & 0xf, bcd2bin(date & 0xff));
+}
diff --git a/arch/sh/boards/mach-sdk7786/irq.c b/arch/sh/boards/mach-sdk7786/irq.c
new file mode 100644
index 0000000..46943a0
--- /dev/null
+++ b/arch/sh/boards/mach-sdk7786/irq.c
@@ -0,0 +1,48 @@
+/*
+ * SDK7786 FPGA IRQ Controller Support.
+ *
+ * Copyright (C) 2010  Matt Fleming
+ * Copyright (C) 2010  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/irq.h>
+#include <mach/fpga.h>
+#include <mach/irq.h>
+
+enum {
+	ATA_IRQ_BIT		= 1,
+	SPI_BUSY_BIT		= 2,
+	LIRQ5_BIT		= 3,
+	LIRQ6_BIT		= 4,
+	LIRQ7_BIT		= 5,
+	LIRQ8_BIT		= 6,
+	KEY_IRQ_BIT		= 7,
+	PEN_IRQ_BIT		= 8,
+	ETH_IRQ_BIT		= 9,
+	RTC_ALARM_BIT		= 10,
+	CRYSTAL_FAIL_BIT	= 12,
+	ETH_PME_BIT		= 14,
+};
+
+void __init sdk7786_init_irq(void)
+{
+	unsigned int tmp;
+
+	/* Enable priority encoding for all IRLs */
+	fpga_write_reg(fpga_read_reg(INTMSR) | 0x0303, INTMSR);
+
+	/* Clear FPGA interrupt status registers */
+	fpga_write_reg(0x0000, INTASR);
+	fpga_write_reg(0x0000, INTBSR);
+
+	/* Unmask FPGA interrupts */
+	tmp = fpga_read_reg(INTAMR);
+	tmp &= ~(1 << ETH_IRQ_BIT);
+	fpga_write_reg(tmp, INTAMR);
+
+	plat_irq_setup_pins(IRQ_MODE_IRL7654_MASK);
+	plat_irq_setup_pins(IRQ_MODE_IRL3210_MASK);
+}
diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c
new file mode 100644
index 0000000..f094ea2
--- /dev/null
+++ b/arch/sh/boards/mach-sdk7786/setup.c
@@ -0,0 +1,189 @@
+/*
+ * Renesas Technology Europe SDK7786 Support.
+ *
+ * Copyright (C) 2010  Matt Fleming
+ * Copyright (C) 2010  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/smsc911x.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <mach/fpga.h>
+#include <mach/irq.h>
+#include <asm/machvec.h>
+#include <asm/heartbeat.h>
+#include <asm/sizes.h>
+#include <asm/reboot.h>
+
+static struct resource heartbeat_resource = {
+	.start		= 0x07fff8b0,
+	.end		= 0x07fff8b0 + sizeof(u16) - 1,
+	.flags		= IORESOURCE_MEM | IORESOURCE_MEM_16BIT,
+};
+
+static struct platform_device heartbeat_device = {
+	.name		= "heartbeat",
+	.id		= -1,
+	.num_resources	= 1,
+	.resource	= &heartbeat_resource,
+};
+
+static struct resource smsc911x_resources[] = {
+	[0] = {
+		.name		= "smsc911x-memory",
+		.start		= 0x07ffff00,
+		.end		= 0x07ffff00 + SZ_256 - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+	[1] = {
+		.name		= "smsc911x-irq",
+		.start		= evt2irq(0x2c0),
+		.end		= evt2irq(0x2c0),
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct smsc911x_platform_config smsc911x_config = {
+	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+	.irq_type	= SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+	.flags		= SMSC911X_USE_32BIT,
+	.phy_interface	= PHY_INTERFACE_MODE_MII,
+};
+
+static struct platform_device smsc911x_device = {
+	.name		= "smsc911x",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(smsc911x_resources),
+	.resource	= smsc911x_resources,
+	.dev = {
+		.platform_data = &smsc911x_config,
+	},
+};
+
+static struct resource smbus_fpga_resource = {
+	.start		= 0x07fff9e0,
+	.end		= 0x07fff9e0 + SZ_32 - 1,
+	.flags		= IORESOURCE_MEM,
+};
+
+static struct platform_device smbus_fpga_device = {
+	.name		= "i2c-sdk7786",
+	.id		= 0,
+	.num_resources	= 1,
+	.resource	= &smbus_fpga_resource,
+};
+
+static struct resource smbus_pcie_resource = {
+	.start		= 0x07fffc30,
+	.end		= 0x07fffc30 + SZ_32 - 1,
+	.flags		= IORESOURCE_MEM,
+};
+
+static struct platform_device smbus_pcie_device = {
+	.name		= "i2c-sdk7786",
+	.id		= 1,
+	.num_resources	= 1,
+	.resource	= &smbus_pcie_resource,
+};
+
+static struct i2c_board_info __initdata sdk7786_i2c_devices[] = {
+	{
+		I2C_BOARD_INFO("max6900", 0x68),
+	},
+};
+
+static struct platform_device *sh7786_devices[] __initdata = {
+	&heartbeat_device,
+	&smsc911x_device,
+	&smbus_fpga_device,
+	&smbus_pcie_device,
+};
+
+static int sdk7786_i2c_setup(void)
+{
+	unsigned int tmp;
+
+	/*
+	 * Hand over I2C control to the FPGA.
+	 */
+	tmp = fpga_read_reg(SBCR);
+	tmp &= ~SCBR_I2CCEN;
+	tmp |= SCBR_I2CMEN;
+	fpga_write_reg(tmp, SBCR);
+
+	return i2c_register_board_info(0, sdk7786_i2c_devices,
+				       ARRAY_SIZE(sdk7786_i2c_devices));
+}
+
+static int __init sdk7786_devices_setup(void)
+{
+	int ret;
+
+	ret = platform_add_devices(sh7786_devices, ARRAY_SIZE(sh7786_devices));
+	if (unlikely(ret != 0))
+		return ret;
+
+	return sdk7786_i2c_setup();
+}
+__initcall(sdk7786_devices_setup);
+
+static int sdk7786_mode_pins(void)
+{
+	return fpga_read_reg(MODSWR);
+}
+
+static int sdk7786_clk_init(void)
+{
+	struct clk *clk;
+	int ret;
+
+	/*
+	 * Only handle the EXTAL case, anyone interfacing a crystal
+	 * resonator will need to provide their own input clock.
+	 */
+	if (test_mode_pin(MODE_PIN9))
+		return -EINVAL;
+
+	clk = clk_get(NULL, "extal");
+	if (!clk || IS_ERR(clk))
+		return PTR_ERR(clk);
+	ret = clk_set_rate(clk, 33333333);
+	clk_put(clk);
+
+	return ret;
+}
+
+static void sdk7786_restart(char *cmd)
+{
+	fpga_write_reg(0xa5a5, SRSTR);
+}
+
+/* Initialize the board */
+static void __init sdk7786_setup(char **cmdline_p)
+{
+	pr_info("Renesas Technology Europe SDK7786 support:\n");
+
+	sdk7786_fpga_init();
+
+	pr_info("\tPCB revision:\t%d\n", fpga_read_reg(PCBRR) & 0xf);
+
+	machine_ops.restart = sdk7786_restart;
+}
+
+/*
+ * The Machine Vector
+ */
+static struct sh_machine_vector mv_sdk7786 __initmv = {
+	.mv_name		= "SDK7786",
+	.mv_setup		= sdk7786_setup,
+	.mv_mode_pins		= sdk7786_mode_pins,
+	.mv_clk_init		= sdk7786_clk_init,
+	.mv_init_irq		= sdk7786_init_irq,
+};
diff --git a/arch/sh/boards/mach-se/7206/io.c b/arch/sh/boards/mach-se/7206/io.c
index 1804556..adadc77 100644
--- a/arch/sh/boards/mach-se/7206/io.c
+++ b/arch/sh/boards/mach-se/7206/io.c
@@ -16,7 +16,7 @@
 
 static inline void delay(void)
 {
-	ctrl_inw(0x20000000);  /* P2 ROM Area */
+	__raw_readw(0x20000000);  /* P2 ROM Area */
 }
 
 /* MS7750 requires special versions of in*, out* routines, since
diff --git a/arch/sh/boards/mach-se/7206/irq.c b/arch/sh/boards/mach-se/7206/irq.c
index aef7f05..8d82175 100644
--- a/arch/sh/boards/mach-se/7206/irq.c
+++ b/arch/sh/boards/mach-se/7206/irq.c
@@ -32,12 +32,12 @@
 	unsigned short msk0,msk1;
 
 	/* Set the priority in IPR to 0 */
-	val = ctrl_inw(INTC_IPR01);
+	val = __raw_readw(INTC_IPR01);
 	val &= mask;
-	ctrl_outw(val, INTC_IPR01);
+	__raw_writew(val, INTC_IPR01);
 	/* FPGA mask set */
-	msk0 = ctrl_inw(INTMSK0);
-	msk1 = ctrl_inw(INTMSK1);
+	msk0 = __raw_readw(INTMSK0);
+	msk1 = __raw_readw(INTMSK1);
 
 	switch (irq) {
 	case IRQ0_IRQ:
@@ -51,8 +51,8 @@
 		msk1 |= 0x00ff;
 		break;
 	}
-	ctrl_outw(msk0, INTMSK0);
-	ctrl_outw(msk1, INTMSK1);
+	__raw_writew(msk0, INTMSK0);
+	__raw_writew(msk1, INTMSK1);
 }
 
 static void enable_se7206_irq(unsigned int irq)
@@ -62,13 +62,13 @@
 	unsigned short msk0,msk1;
 
 	/* Set priority in IPR back to original value */
-	val = ctrl_inw(INTC_IPR01);
+	val = __raw_readw(INTC_IPR01);
 	val |= value;
-	ctrl_outw(val, INTC_IPR01);
+	__raw_writew(val, INTC_IPR01);
 
 	/* FPGA mask reset */
-	msk0 = ctrl_inw(INTMSK0);
-	msk1 = ctrl_inw(INTMSK1);
+	msk0 = __raw_readw(INTMSK0);
+	msk1 = __raw_readw(INTMSK1);
 
 	switch (irq) {
 	case IRQ0_IRQ:
@@ -82,19 +82,20 @@
 		msk1 &= ~0x00ff;
 		break;
 	}
-	ctrl_outw(msk0, INTMSK0);
-	ctrl_outw(msk1, INTMSK1);
+	__raw_writew(msk0, INTMSK0);
+	__raw_writew(msk1, INTMSK1);
 }
 
 static void eoi_se7206_irq(unsigned int irq)
 {
 	unsigned short sts0,sts1;
+	struct irq_desc *desc = irq_to_desc(irq);
 
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+	if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
 		enable_se7206_irq(irq);
 	/* FPGA isr clear */
-	sts0 = ctrl_inw(INTSTS0);
-	sts1 = ctrl_inw(INTSTS1);
+	sts0 = __raw_readw(INTSTS0);
+	sts1 = __raw_readw(INTSTS1);
 
 	switch (irq) {
 	case IRQ0_IRQ:
@@ -108,8 +109,8 @@
 		sts1 &= ~0x00ff;
 		break;
 	}
-	ctrl_outw(sts0, INTSTS0);
-	ctrl_outw(sts1, INTSTS1);
+	__raw_writew(sts0, INTSTS0);
+	__raw_writew(sts1, INTSTS1);
 }
 
 static struct irq_chip se7206_irq_chip __read_mostly = {
@@ -136,11 +137,11 @@
 	make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */
 	make_se7206_irq(IRQ1_IRQ); /* ATA */
 	make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
-	ctrl_outw(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */
+	__raw_writew(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */
 
 	/* FPGA System register setup*/
-	ctrl_outw(0x0000,INTSTS0); /* Clear INTSTS0 */
-	ctrl_outw(0x0000,INTSTS1); /* Clear INTSTS1 */
+	__raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */
+	__raw_writew(0x0000,INTSTS1); /* Clear INTSTS1 */
 	/* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */
-	ctrl_outw(0x0001,INTSEL);
+	__raw_writew(0x0001,INTSEL);
 }
diff --git a/arch/sh/boards/mach-se/7206/setup.c b/arch/sh/boards/mach-se/7206/setup.c
index f5466384..8f5c65d 100644
--- a/arch/sh/boards/mach-se/7206/setup.c
+++ b/arch/sh/boards/mach-se/7206/setup.c
@@ -50,15 +50,12 @@
 static struct heartbeat_data heartbeat_data = {
 	.bit_pos	= heartbeat_bit_pos,
 	.nr_bits	= ARRAY_SIZE(heartbeat_bit_pos),
-	.regsize	= 32,
 };
 
-static struct resource heartbeat_resources[] = {
-	[0] = {
-		.start	= PA_LED,
-		.end	= PA_LED,
-		.flags	= IORESOURCE_MEM,
-	},
+static struct resource heartbeat_resource = {
+	.start	= PA_LED,
+	.end	= PA_LED,
+	.flags	= IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
 };
 
 static struct platform_device heartbeat_device = {
@@ -67,8 +64,8 @@
 	.dev	= {
 		.platform_data	= &heartbeat_data,
 	},
-	.num_resources	= ARRAY_SIZE(heartbeat_resources),
-	.resource	= heartbeat_resources,
+	.num_resources	= 1,
+	.resource	= &heartbeat_resource,
 };
 
 static struct platform_device *se7206_devices[] __initdata = {
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c
index 051c29d..d4305c2 100644
--- a/arch/sh/boards/mach-se/7343/irq.c
+++ b/arch/sh/boards/mach-se/7343/irq.c
@@ -16,16 +16,18 @@
 #include <linux/io.h>
 #include <mach-se/mach/se7343.h>
 
+unsigned int se7343_fpga_irq[SE7343_FPGA_IRQ_NR] = { 0, };
+
 static void disable_se7343_irq(unsigned int irq)
 {
-	unsigned int bit = irq - SE7343_FPGA_IRQ_BASE;
-	ctrl_outw(ctrl_inw(PA_CPLD_IMSK) | 1 << bit, PA_CPLD_IMSK);
+	unsigned int bit = (unsigned int)get_irq_chip_data(irq);
+	__raw_writew(__raw_readw(PA_CPLD_IMSK) | 1 << bit, PA_CPLD_IMSK);
 }
 
 static void enable_se7343_irq(unsigned int irq)
 {
-	unsigned int bit = irq - SE7343_FPGA_IRQ_BASE;
-	ctrl_outw(ctrl_inw(PA_CPLD_IMSK) & ~(1 << bit), PA_CPLD_IMSK);
+	unsigned int bit = (unsigned int)get_irq_chip_data(irq);
+	__raw_writew(__raw_readw(PA_CPLD_IMSK) & ~(1 << bit), PA_CPLD_IMSK);
 }
 
 static struct irq_chip se7343_irq_chip __read_mostly = {
@@ -37,19 +39,16 @@
 
 static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc)
 {
-	unsigned short intv = ctrl_inw(PA_CPLD_ST);
-	struct irq_desc *ext_desc;
-	unsigned int ext_irq = SE7343_FPGA_IRQ_BASE;
+	unsigned short intv = __raw_readw(PA_CPLD_ST);
+	unsigned int ext_irq = 0;
 
 	intv &= (1 << SE7343_FPGA_IRQ_NR) - 1;
 
-	while (intv) {
-		if (intv & 1) {
-			ext_desc = irq_desc + ext_irq;
-			handle_level_irq(ext_irq, ext_desc);
-		}
-		intv >>= 1;
-		ext_irq++;
+	for (; intv; intv >>= 1, ext_irq++) {
+		if (!(intv & 1))
+			continue;
+
+		generic_handle_irq(se7343_fpga_irq[ext_irq]);
 	}
 }
 
@@ -58,16 +57,24 @@
  */
 void __init init_7343se_IRQ(void)
 {
-	int i;
+	int i, irq;
 
-	ctrl_outw(0, PA_CPLD_IMSK);	/* disable all irqs */
-	ctrl_outw(0x2000, 0xb03fffec);	/* mrshpc irq enable */
+	__raw_writew(0, PA_CPLD_IMSK);	/* disable all irqs */
+	__raw_writew(0x2000, 0xb03fffec);	/* mrshpc irq enable */
 
-	for (i = 0; i < SE7343_FPGA_IRQ_NR; i++)
-		set_irq_chip_and_handler_name(SE7343_FPGA_IRQ_BASE + i,
+	for (i = 0; i < SE7343_FPGA_IRQ_NR; i++) {
+		irq = create_irq();
+		if (irq < 0)
+			return;
+		se7343_fpga_irq[i] = irq;
+
+		set_irq_chip_and_handler_name(se7343_fpga_irq[i],
 					      &se7343_irq_chip,
 					      handle_level_irq, "level");
 
+		set_irq_chip_data(se7343_fpga_irq[i], (void *)i);
+	}
+
 	set_irq_chained_handler(IRQ0_IRQ, se7343_irq_demux);
 	set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
 	set_irq_chained_handler(IRQ1_IRQ, se7343_irq_demux);
diff --git a/arch/sh/boards/mach-se/7343/setup.c b/arch/sh/boards/mach-se/7343/setup.c
index 4de56f3..d2370af 100644
--- a/arch/sh/boards/mach-se/7343/setup.c
+++ b/arch/sh/boards/mach-se/7343/setup.c
@@ -11,26 +11,17 @@
 #include <asm/irq.h>
 #include <asm/io.h>
 
-static struct resource heartbeat_resources[] = {
-	[0] = {
-		.start	= PA_LED,
-		.end	= PA_LED,
-		.flags	= IORESOURCE_MEM,
-	},
-};
-
-static struct heartbeat_data heartbeat_data = {
-	.regsize = 16,
+static struct resource heartbeat_resource = {
+	.start	= PA_LED,
+	.end	= PA_LED,
+	.flags	= IORESOURCE_MEM | IORESOURCE_MEM_16BIT,
 };
 
 static struct platform_device heartbeat_device = {
 	.name		= "heartbeat",
 	.id		= -1,
-	.dev = {
-		.platform_data = &heartbeat_data,
-	},
-	.num_resources	= ARRAY_SIZE(heartbeat_resources),
-	.resource	= heartbeat_resources,
+	.num_resources	= 1,
+	.resource	= &heartbeat_resource,
 };
 
 static struct mtd_partition nor_flash_partitions[] = {
@@ -82,7 +73,6 @@
 		.mapbase	= 0x16000000,
 		.regshift	= 1,
 		.flags		= ST16C2550C_FLAGS,
-		.irq		= UARTA_IRQ,
 		.uartclk	= 7372800,
 	},
 	[1] = {
@@ -90,7 +80,6 @@
 		.mapbase	= 0x17000000,
 		.regshift	= 1,
 		.flags		= ST16C2550C_FLAGS,
-		.irq		= UARTB_IRQ,
 		.uartclk	= 7372800,
 	},
 	{ },
@@ -121,7 +110,7 @@
 		.flags  = IORESOURCE_MEM,
 	},
 	[2] = {
-		.start  = USB_IRQ,
+		/* Filled in later */
 		.flags  = IORESOURCE_IRQ,
 	},
 };
@@ -138,8 +127,8 @@
 static struct platform_device usb_device = {
 	.name			= "isp116x-hcd",
 	.id			= -1,
-	.num_resources  	= ARRAY_SIZE(usb_resources),
-	.resource       	= usb_resources,
+	.num_resources		= ARRAY_SIZE(usb_resources),
+	.resource		= usb_resources,
 	.dev			= {
 		.platform_data	= &usb_platform_data,
 	},
@@ -155,6 +144,13 @@
 
 static int __init sh7343se_devices_setup(void)
 {
+	/* Wire-up dynamic vectors */
+	serial_platform_data[0].irq = se7343_fpga_irq[SE7343_FPGA_IRQ_UARTA];
+	serial_platform_data[1].irq = se7343_fpga_irq[SE7343_FPGA_IRQ_UARTB];
+
+	usb_resources[2].start = usb_resources[2].end =
+		se7343_fpga_irq[SE7343_FPGA_IRQ_USB];
+
 	return platform_add_devices(sh7343se_platform_devices,
 				    ARRAY_SIZE(sh7343se_platform_devices));
 }
@@ -165,10 +161,10 @@
  */
 static void __init sh7343se_setup(char **cmdline_p)
 {
-	ctrl_outw(0xf900, FPGA_OUT);	/* FPGA */
+	__raw_writew(0xf900, FPGA_OUT);	/* FPGA */
 
-	ctrl_outw(0x0002, PORT_PECR);	/* PORT E 1 = IRQ5 */
-	ctrl_outw(0x0020, PORT_PSELD);
+	__raw_writew(0x0002, PORT_PECR);	/* PORT E 1 = IRQ5 */
+	__raw_writew(0x0020, PORT_PSELD);
 
 	printk(KERN_INFO "MS7343CP01 Setup...done\n");
 }
@@ -179,6 +175,5 @@
 static struct sh_machine_vector mv_7343se __initmv = {
 	.mv_name = "SolutionEngine 7343",
 	.mv_setup = sh7343se_setup,
-	.mv_nr_irqs = SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_NR,
 	.mv_init_irq = init_7343se_IRQ,
 };
diff --git a/arch/sh/boards/mach-se/770x/irq.c b/arch/sh/boards/mach-se/770x/irq.c
index ec1fea5..1028c17 100644
--- a/arch/sh/boards/mach-se/770x/irq.c
+++ b/arch/sh/boards/mach-se/770x/irq.c
@@ -96,13 +96,13 @@
 void __init init_se_IRQ(void)
 {
 	/* Disable all interrupts */
-	ctrl_outw(0, BCR_ILCRA);
-	ctrl_outw(0, BCR_ILCRB);
-	ctrl_outw(0, BCR_ILCRC);
-	ctrl_outw(0, BCR_ILCRD);
-	ctrl_outw(0, BCR_ILCRE);
-	ctrl_outw(0, BCR_ILCRF);
-	ctrl_outw(0, BCR_ILCRG);
+	__raw_writew(0, BCR_ILCRA);
+	__raw_writew(0, BCR_ILCRB);
+	__raw_writew(0, BCR_ILCRC);
+	__raw_writew(0, BCR_ILCRD);
+	__raw_writew(0, BCR_ILCRE);
+	__raw_writew(0, BCR_ILCRF);
+	__raw_writew(0, BCR_ILCRG);
 
 	register_ipr_controller(&ipr_irq_desc);
 }
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 527eb6b..66d39d1 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -93,15 +93,12 @@
 static struct heartbeat_data heartbeat_data = {
 	.bit_pos	= heartbeat_bit_pos,
 	.nr_bits	= ARRAY_SIZE(heartbeat_bit_pos),
-	.regsize	= 16,
 };
 
-static struct resource heartbeat_resources[] = {
-	[0] = {
-		.start	= PA_LED,
-		.end	= PA_LED,
-		.flags	= IORESOURCE_MEM,
-	},
+static struct resource heartbeat_resource = {
+	.start	= PA_LED,
+	.end	= PA_LED,
+	.flags	= IORESOURCE_MEM | IORESOURCE_MEM_16BIT,
 };
 
 static struct platform_device heartbeat_device = {
@@ -110,8 +107,8 @@
 	.dev	= {
 		.platform_data	= &heartbeat_data,
 	},
-	.num_resources	= ARRAY_SIZE(heartbeat_resources),
-	.resource	= heartbeat_resources,
+	.num_resources	= 1,
+	.resource	= &heartbeat_resource,
 };
 
 #if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
diff --git a/arch/sh/boards/mach-se/7721/irq.c b/arch/sh/boards/mach-se/7721/irq.c
index b417acc..d85022e 100644
--- a/arch/sh/boards/mach-se/7721/irq.c
+++ b/arch/sh/boards/mach-se/7721/irq.c
@@ -38,7 +38,7 @@
 void __init init_se7721_IRQ(void)
 {
 	/* PPCR */
-	ctrl_outw(ctrl_inw(0xa4050118) & ~0x00ff, 0xa4050118);
+	__raw_writew(__raw_readw(0xa4050118) & ~0x00ff, 0xa4050118);
 
 	register_intc_controller(&intc_desc);
 	intc_set_priority(MRSHPC_IRQ0, 0xf - MRSHPC_IRQ0);
diff --git a/arch/sh/boards/mach-se/7721/setup.c b/arch/sh/boards/mach-se/7721/setup.c
index 55af4c3..7416ad7 100644
--- a/arch/sh/boards/mach-se/7721/setup.c
+++ b/arch/sh/boards/mach-se/7721/setup.c
@@ -23,15 +23,12 @@
 static struct heartbeat_data heartbeat_data = {
 	.bit_pos	= heartbeat_bit_pos,
 	.nr_bits	= ARRAY_SIZE(heartbeat_bit_pos),
-	.regsize	= 16,
 };
 
-static struct resource heartbeat_resources[] = {
-	[0] = {
-		.start	= PA_LED,
-		.end	= PA_LED,
-		.flags	= IORESOURCE_MEM,
-	},
+static struct resource heartbeat_resource = {
+	.start	= PA_LED,
+	.end	= PA_LED,
+	.flags	= IORESOURCE_MEM | IORESOURCE_MEM_16BIT,
 };
 
 static struct platform_device heartbeat_device = {
@@ -40,8 +37,8 @@
 	.dev	= {
 		.platform_data	= &heartbeat_data,
 	},
-	.num_resources	= ARRAY_SIZE(heartbeat_resources),
-	.resource	= heartbeat_resources,
+	.num_resources	= 1,
+	.resource	= &heartbeat_resource,
 };
 
 static struct resource cf_ide_resources[] = {
@@ -83,10 +80,10 @@
 static void __init se7721_setup(char **cmdline_p)
 {
 	/* for USB */
-	ctrl_outw(0x0000, 0xA405010C);	/* PGCR */
-	ctrl_outw(0x0000, 0xA405010E);	/* PHCR */
-	ctrl_outw(0x00AA, 0xA4050118);	/* PPCR */
-	ctrl_outw(0x0000, 0xA4050124);	/* PSELA */
+	__raw_writew(0x0000, 0xA405010C);	/* PGCR */
+	__raw_writew(0x0000, 0xA405010E);	/* PHCR */
+	__raw_writew(0x00AA, 0xA4050118);	/* PPCR */
+	__raw_writew(0x0000, 0xA4050124);	/* PSELA */
 }
 
 /*
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index b221b68..61605db 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -21,13 +21,13 @@
 static void disable_se7722_irq(unsigned int irq)
 {
 	unsigned int bit = (unsigned int)get_irq_chip_data(irq);
-	ctrl_outw(ctrl_inw(IRQ01_MASK) | 1 << bit, IRQ01_MASK);
+	__raw_writew(__raw_readw(IRQ01_MASK) | 1 << bit, IRQ01_MASK);
 }
 
 static void enable_se7722_irq(unsigned int irq)
 {
 	unsigned int bit = (unsigned int)get_irq_chip_data(irq);
-	ctrl_outw(ctrl_inw(IRQ01_MASK) & ~(1 << bit), IRQ01_MASK);
+	__raw_writew(__raw_readw(IRQ01_MASK) & ~(1 << bit), IRQ01_MASK);
 }
 
 static struct irq_chip se7722_irq_chip __read_mostly = {
@@ -39,7 +39,7 @@
 
 static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
 {
-	unsigned short intv = ctrl_inw(IRQ01_STS);
+	unsigned short intv = __raw_readw(IRQ01_STS);
 	unsigned int ext_irq = 0;
 
 	intv &= (1 << SE7722_FPGA_IRQ_NR) - 1;
@@ -59,8 +59,8 @@
 {
 	int i, irq;
 
-	ctrl_outw(0, IRQ01_MASK);       /* disable all irqs */
-	ctrl_outw(0x2000, 0xb03fffec);  /* mrshpc irq enable */
+	__raw_writew(0, IRQ01_MASK);       /* disable all irqs */
+	__raw_writew(0x2000, 0xb03fffec);  /* mrshpc irq enable */
 
 	for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) {
 		irq = create_irq();
diff --git a/arch/sh/boards/mach-se/7722/setup.c b/arch/sh/boards/mach-se/7722/setup.c
index b1cb942..80a4e57 100644
--- a/arch/sh/boards/mach-se/7722/setup.c
+++ b/arch/sh/boards/mach-se/7722/setup.c
@@ -25,26 +25,17 @@
 #include <cpu/sh7722.h>
 
 /* Heartbeat */
-static struct heartbeat_data heartbeat_data = {
-	.regsize = 16,
-};
-
-static struct resource heartbeat_resources[] = {
-	[0] = {
-		.start  = PA_LED,
-		.end    = PA_LED,
-		.flags  = IORESOURCE_MEM,
-	},
+static struct resource heartbeat_resource = {
+	.start  = PA_LED,
+	.end    = PA_LED,
+	.flags  = IORESOURCE_MEM | IORESOURCE_MEM_16BIT,
 };
 
 static struct platform_device heartbeat_device = {
 	.name           = "heartbeat",
 	.id             = -1,
-	.dev = {
-		.platform_data = &heartbeat_data,
-	},
-	.num_resources  = ARRAY_SIZE(heartbeat_resources),
-	.resource       = heartbeat_resources,
+	.num_resources  = 1,
+	.resource       = &heartbeat_resource,
 };
 
 /* SMC91x */
@@ -165,32 +156,32 @@
 
 static void __init se7722_setup(char **cmdline_p)
 {
-	ctrl_outw(0x010D, FPGA_OUT);    /* FPGA */
+	__raw_writew(0x010D, FPGA_OUT);    /* FPGA */
 
-	ctrl_outw(0x0000, PORT_PECR);   /* PORT E 1 = IRQ5 ,E 0 = BS */
-	ctrl_outw(0x1000, PORT_PJCR);   /* PORT J 1 = IRQ1,J 0 =IRQ0 */
+	__raw_writew(0x0000, PORT_PECR);   /* PORT E 1 = IRQ5 ,E 0 = BS */
+	__raw_writew(0x1000, PORT_PJCR);   /* PORT J 1 = IRQ1,J 0 =IRQ0 */
 
 	/* LCDC I/O */
-	ctrl_outw(0x0020, PORT_PSELD);
+	__raw_writew(0x0020, PORT_PSELD);
 
 	/* SIOF1*/
-	ctrl_outw(0x0003, PORT_PSELB);
-	ctrl_outw(0xe000, PORT_PSELC);
-	ctrl_outw(0x0000, PORT_PKCR);
+	__raw_writew(0x0003, PORT_PSELB);
+	__raw_writew(0xe000, PORT_PSELC);
+	__raw_writew(0x0000, PORT_PKCR);
 
 	/* LCDC */
-	ctrl_outw(0x4020, PORT_PHCR);
-	ctrl_outw(0x0000, PORT_PLCR);
-	ctrl_outw(0x0000, PORT_PMCR);
-	ctrl_outw(0x0002, PORT_PRCR);
-	ctrl_outw(0x0000, PORT_PXCR);   /* LCDC,CS6A */
+	__raw_writew(0x4020, PORT_PHCR);
+	__raw_writew(0x0000, PORT_PLCR);
+	__raw_writew(0x0000, PORT_PMCR);
+	__raw_writew(0x0002, PORT_PRCR);
+	__raw_writew(0x0000, PORT_PXCR);   /* LCDC,CS6A */
 
 	/* KEYSC */
-	ctrl_outw(0x0A10, PORT_PSELA); /* BS,SHHID2 */
-	ctrl_outw(0x0000, PORT_PYCR);
-	ctrl_outw(0x0000, PORT_PZCR);
-	ctrl_outw(ctrl_inw(PORT_HIZCRA) & ~0x4000, PORT_HIZCRA);
-	ctrl_outw(ctrl_inw(PORT_HIZCRC) & ~0xc000, PORT_HIZCRC);
+	__raw_writew(0x0A10, PORT_PSELA); /* BS,SHHID2 */
+	__raw_writew(0x0000, PORT_PYCR);
+	__raw_writew(0x0000, PORT_PZCR);
+	__raw_writew(__raw_readw(PORT_HIZCRA) & ~0x4000, PORT_HIZCRA);
+	__raw_writew(__raw_readw(PORT_HIZCRC) & ~0xc000, PORT_HIZCRC);
 }
 
 /*
diff --git a/arch/sh/boards/mach-se/7724/irq.c b/arch/sh/boards/mach-se/7724/irq.c
index f76cf3b..0942be2 100644
--- a/arch/sh/boards/mach-se/7724/irq.c
+++ b/arch/sh/boards/mach-se/7724/irq.c
@@ -72,14 +72,14 @@
 {
 	struct fpga_irq set = get_fpga_irq(fpga2irq(irq));
 	unsigned int bit = irq - set.base;
-	ctrl_outw(ctrl_inw(set.mraddr) | 0x0001 << bit, set.mraddr);
+	__raw_writew(__raw_readw(set.mraddr) | 0x0001 << bit, set.mraddr);
 }
 
 static void enable_se7724_irq(unsigned int irq)
 {
 	struct fpga_irq set = get_fpga_irq(fpga2irq(irq));
 	unsigned int bit = irq - set.base;
-	ctrl_outw(ctrl_inw(set.mraddr) & ~(0x0001 << bit), set.mraddr);
+	__raw_writew(__raw_readw(set.mraddr) & ~(0x0001 << bit), set.mraddr);
 }
 
 static struct irq_chip se7724_irq_chip __read_mostly = {
@@ -92,19 +92,16 @@
 static void se7724_irq_demux(unsigned int irq, struct irq_desc *desc)
 {
 	struct fpga_irq set = get_fpga_irq(irq);
-	unsigned short intv = ctrl_inw(set.sraddr);
-	struct irq_desc *ext_desc;
+	unsigned short intv = __raw_readw(set.sraddr);
 	unsigned int ext_irq = set.base;
 
 	intv &= set.mask;
 
-	while (intv) {
-		if (intv & 0x0001) {
-			ext_desc = irq_desc + ext_irq;
-			handle_level_irq(ext_irq, ext_desc);
-		}
-		intv >>= 1;
-		ext_irq++;
+	for (; intv; intv >>= 1, ext_irq++) {
+		if (!(intv & 1))
+			continue;
+
+		generic_handle_irq(ext_irq);
 	}
 }
 
@@ -113,20 +110,39 @@
  */
 void __init init_se7724_IRQ(void)
 {
-	int i;
+	int i, nid = cpu_to_node(boot_cpu_data);
 
-	ctrl_outw(0xffff, IRQ0_MR);  /* mask all */
-	ctrl_outw(0xffff, IRQ1_MR);  /* mask all */
-	ctrl_outw(0xffff, IRQ2_MR);  /* mask all */
-	ctrl_outw(0x0000, IRQ0_SR);  /* clear irq */
-	ctrl_outw(0x0000, IRQ1_SR);  /* clear irq */
-	ctrl_outw(0x0000, IRQ2_SR);  /* clear irq */
-	ctrl_outw(0x002a, IRQ_MODE); /* set irq type */
+	__raw_writew(0xffff, IRQ0_MR);  /* mask all */
+	__raw_writew(0xffff, IRQ1_MR);  /* mask all */
+	__raw_writew(0xffff, IRQ2_MR);  /* mask all */
+	__raw_writew(0x0000, IRQ0_SR);  /* clear irq */
+	__raw_writew(0x0000, IRQ1_SR);  /* clear irq */
+	__raw_writew(0x0000, IRQ2_SR);  /* clear irq */
+	__raw_writew(0x002a, IRQ_MODE); /* set irq type */
 
-	for (i = 0; i < SE7724_FPGA_IRQ_NR; i++)
-		set_irq_chip_and_handler_name(SE7724_FPGA_IRQ_BASE + i,
+	for (i = 0; i < SE7724_FPGA_IRQ_NR; i++) {
+		int irq, wanted;
+
+		wanted = SE7724_FPGA_IRQ_BASE + i;
+
+		irq = create_irq_nr(wanted, nid);
+		if (unlikely(irq == 0)) {
+			pr_err("%s: failed hooking irq %d for FPGA\n",
+			       __func__, wanted);
+			return;
+		}
+
+		if (unlikely(irq != wanted)) {
+			pr_err("%s: got irq %d but wanted %d, bailing.\n",
+			       __func__, irq, wanted);
+			destroy_irq(irq);
+			return;
+		}
+
+		set_irq_chip_and_handler_name(irq,
 					      &se7724_irq_chip,
 					      handle_level_irq, "level");
+	}
 
 	set_irq_chained_handler(IRQ0_IRQ, se7724_irq_demux);
 	set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
diff --git a/arch/sh/boards/mach-se/7724/sdram.S b/arch/sh/boards/mach-se/7724/sdram.S
index 9040167..6fa4734 100644
--- a/arch/sh/boards/mach-se/7724/sdram.S
+++ b/arch/sh/boards/mach-se/7724/sdram.S
@@ -39,6 +39,10 @@
 
 	/* DBSC: put memory in auto-refresh mode */
 
+	mov.l	@(SH_SLEEP_MODE, r5), r0
+	tst	#SUSP_SH_RSTANDBY, r0
+	bf	resume_rstandby
+
 	ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
 	WAIT 1
 	ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
@@ -49,4 +53,79 @@
 	rts
 	 nop
 
+resume_rstandby:
+
+	/* CPG: setup clocks before restarting external memory */
+
+	ED 0xA4150024, 0x00004000 /* PLLCR */
+
+	mov.l	FRQCRA,r0
+	mov.l	@r0,r3
+	mov.l	KICK,r1
+	or	r1, r3
+	mov.l	r3, @r0
+
+	mov.l	LSTATS,r0
+	mov	#1,r1
+WAIT_LSTATS:
+	mov.l	@r0,r3
+	tst	r1,r3
+	bf	WAIT_LSTATS
+
+	/* DBSC: re-initialize and put in auto-refresh */
+
+	ED 0xFD000108, 0x00000181 /* DBPDCNT0 */
+	ED 0xFD000020, 0x015B0002 /* DBCONF */
+	ED 0xFD000030, 0x03071502 /* DBTR0 */
+	ED 0xFD000034, 0x02020102 /* DBTR1 */
+	ED 0xFD000038, 0x01090405 /* DBTR2 */
+	ED 0xFD00003C, 0x00000002 /* DBTR3 */
+	ED 0xFD000008, 0x00000005 /* DBKIND */
+	ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
+	ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
+	ED 0xFD000018, 0x00000001 /* DBCKECNT */
+
+	mov	#100,r0
+WAIT_400NS:
+	dt	r0
+	bf	WAIT_400NS
+
+	ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
+	ED 0xFD000060, 0x00020000 /* DBMRCNT (EMR2) */
+	ED 0xFD000060, 0x00030000 /* DBMRCNT (EMR3) */
+	ED 0xFD000060, 0x00010004 /* DBMRCNT (EMR) */
+	ED 0xFD000060, 0x00000532 /* DBMRCNT (MRS) */
+	ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
+	ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+	ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+	ED 0xFD000060, 0x00000432 /* DBMRCNT (MRS) */
+	ED 0xFD000060, 0x000103c0 /* DBMRCNT (EMR) */
+	ED 0xFD000060, 0x00010040 /* DBMRCNT (EMR) */
+
+	mov	#100,r0
+WAIT_400NS_2:
+	dt	r0
+	bf	WAIT_400NS_2
+
+	ED 0xFD000010, 0x00000001 /* DBEN */
+	ED 0xFD000044, 0x0000050f /* DBRFPDN1 */
+	ED 0xFD000048, 0x236800e6 /* DBRFPDN2 */
+
+	mov.l	DUMMY,r0
+	mov.l	@r0, r1 /* force single dummy read */
+
+	ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
+	ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+	ED 0xFD000108, 0x00000080 /* DBPDCNT0 */
+	ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
+
+	rts
+	 nop
+
+	.balign 4
+DUMMY:	.long	0xac400000
+FRQCRA:	.long	0xa4150000
+KICK:	.long	0x80000000
+LSTATS:	.long	0xa4150060
+
 ENTRY(ms7724se_sdram_leave_end)
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 858ecb2..66cdbc3 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -53,26 +53,17 @@
  */
 
 /* Heartbeat */
-static struct heartbeat_data heartbeat_data = {
-	.regsize = 16,
-};
-
-static struct resource heartbeat_resources[] = {
-	[0] = {
-		.start  = PA_LED,
-		.end    = PA_LED,
-		.flags  = IORESOURCE_MEM,
-	},
+static struct resource heartbeat_resource = {
+	.start  = PA_LED,
+	.end    = PA_LED,
+	.flags  = IORESOURCE_MEM | IORESOURCE_MEM_16BIT,
 };
 
 static struct platform_device heartbeat_device = {
 	.name           = "heartbeat",
 	.id             = -1,
-	.dev = {
-		.platform_data = &heartbeat_data,
-	},
-	.num_resources  = ARRAY_SIZE(heartbeat_resources),
-	.resource       = heartbeat_resources,
+	.num_resources  = 1,
+	.resource       = &heartbeat_resource,
 };
 
 /* LAN91C111 */
@@ -265,12 +256,12 @@
 #define FCLKACR		0xa4150008
 static void fsimck_init(struct clk *clk)
 {
-	u32 status = ctrl_inl(clk->enable_reg);
+	u32 status = __raw_readl(clk->enable_reg);
 
 	/* use external clock */
 	status &= ~0x000000ff;
 	status |= 0x00000080;
-	ctrl_outl(status, clk->enable_reg);
+	__raw_writel(status, clk->enable_reg);
 }
 
 static struct clk_ops fsimck_clk_ops = {
@@ -322,7 +313,7 @@
 /* KEYSC in SoC (Needs SW33-2 set to ON) */
 static struct sh_keysc_info keysc_info = {
 	.mode = SH_KEYSC_MODE_1,
-	.scan_timing = 10,
+	.scan_timing = 3,
 	.delay = 50,
 	.keycodes = {
 		KEY_1, KEY_2, KEY_3, KEY_4, KEY_5,
@@ -460,7 +451,7 @@
 		.flags  = IORESOURCE_MEM,
 	},
 	[1] = {
-		.start  = 101,
+		.start  = 100,
 		.flags  = IORESOURCE_IRQ,
 	},
 };
@@ -483,7 +474,7 @@
 		.flags  = IORESOURCE_MEM,
 	},
 	[1] = {
-		.start  = 24,
+		.start  = 23,
 		.flags  = IORESOURCE_IRQ,
 	},
 };
@@ -498,6 +489,26 @@
 	},
 };
 
+/* IrDA */
+static struct resource irda_resources[] = {
+	[0] = {
+		.name	= "IrDA",
+		.start  = 0xA45D0000,
+		.end    = 0xA45D0049,
+		.flags  = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start  = 20,
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device irda_device = {
+	.name           = "sh_sir",
+	.num_resources  = ARRAY_SIZE(irda_resources),
+	.resource       = irda_resources,
+};
+
 static struct platform_device *ms7724se_devices[] __initdata = {
 	&heartbeat_device,
 	&smc91x_eth_device,
@@ -512,6 +523,7 @@
 	&fsi_device,
 	&sdhi0_cn7_device,
 	&sdhi1_cn8_device,
+	&irda_device,
 };
 
 /* I2C device */
@@ -531,7 +543,7 @@
 	int t = 10000;
 
 	while (t--) {
-		if (!ctrl_inw(EEPROM_STAT))
+		if (!__raw_readw(EEPROM_STAT))
 			return 1;
 		udelay(1);
 	}
@@ -551,13 +563,13 @@
 
 	/* read MAC addr from EEPROM */
 	for (i = 0 ; i < 3 ; i++) {
-		ctrl_outw(0x0, EEPROM_OP); /* read */
-		ctrl_outw(i*2, EEPROM_ADR);
-		ctrl_outw(0x1, EEPROM_STRT);
+		__raw_writew(0x0, EEPROM_OP); /* read */
+		__raw_writew(i*2, EEPROM_ADR);
+		__raw_writew(0x1, EEPROM_STRT);
 		if (!sh_eth_is_eeprom_ready())
 			return;
 
-		mac = ctrl_inw(EEPROM_DATA);
+		mac = __raw_readw(EEPROM_DATA);
 		sh_eth_plat.mac_addr[i << 1] = mac & 0xff;
 		sh_eth_plat.mac_addr[(i << 1) + 1] = mac >> 8;
 	}
@@ -594,28 +606,29 @@
 
 static int __init devices_setup(void)
 {
-	u16 sw = ctrl_inw(SW4140); /* select camera, monitor */
-	struct clk *fsia_clk;
+	u16 sw = __raw_readw(SW4140); /* select camera, monitor */
+	struct clk *clk;
 
 	/* register board specific self-refresh code */
-	sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF,
+	sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF |
+					SUSP_SH_RSTANDBY,
 					&ms7724se_sdram_enter_start,
 					&ms7724se_sdram_enter_end,
 					&ms7724se_sdram_leave_start,
 					&ms7724se_sdram_leave_end);
 	/* Reset Release */
-	ctrl_outw(ctrl_inw(FPGA_OUT) &
+	__raw_writew(__raw_readw(FPGA_OUT) &
 		  ~((1 << 1)  | /* LAN */
 		    (1 << 6)  | /* VIDEO DAC */
 		    (1 << 7)  | /* AK4643 */
+		    (1 << 8)  | /* IrDA */
 		    (1 << 12) | /* USB0 */
 		    (1 << 14)), /* RMII */
 		  FPGA_OUT);
 
 	/* turn on USB clocks, use external clock */
-	ctrl_outw((ctrl_inw(PORT_MSELCRB) & ~0xc000) | 0x8000, PORT_MSELCRB);
+	__raw_writew((__raw_readw(PORT_MSELCRB) & ~0xc000) | 0x8000, PORT_MSELCRB);
 
-#ifdef CONFIG_PM
 	/* Let LED9 show STATUS2 */
 	gpio_request(GPIO_FN_STATUS2, NULL);
 
@@ -624,28 +637,12 @@
 
 	/* Lit LED11 show PDSTATUS */
 	gpio_request(GPIO_FN_PDSTATUS, NULL);
-#else
-	/* Lit LED9 */
-	gpio_request(GPIO_PTJ6, NULL);
-	gpio_direction_output(GPIO_PTJ6, 1);
-	gpio_export(GPIO_PTJ6, 0);
-
-	/* Lit LED10 */
-	gpio_request(GPIO_PTJ5, NULL);
-	gpio_direction_output(GPIO_PTJ5, 1);
-	gpio_export(GPIO_PTJ5, 0);
-
-	/* Lit LED11 */
-	gpio_request(GPIO_PTJ7, NULL);
-	gpio_direction_output(GPIO_PTJ7, 1);
-	gpio_export(GPIO_PTJ7, 0);
-#endif
 
 	/* enable USB0 port */
-	ctrl_outw(0x0600, 0xa40501d4);
+	__raw_writew(0x0600, 0xa40501d4);
 
 	/* enable USB1 port */
-	ctrl_outw(0x0600, 0xa4050192);
+	__raw_writew(0x0600, 0xa4050192);
 
 	/* enable IRQ 0,1,2 */
 	gpio_request(GPIO_FN_INTC_IRQ0, NULL);
@@ -693,7 +690,7 @@
 	gpio_request(GPIO_FN_LCDVCPWC, NULL);
 	gpio_request(GPIO_FN_LCDRD,    NULL);
 	gpio_request(GPIO_FN_LCDLCLK,  NULL);
-	ctrl_outw((ctrl_inw(PORT_HIZA) & ~0x0001), PORT_HIZA);
+	__raw_writew((__raw_readw(PORT_HIZA) & ~0x0001), PORT_HIZA);
 
 	/* enable CEU0 */
 	gpio_request(GPIO_FN_VIO0_D15, NULL);
@@ -764,13 +761,18 @@
 	gpio_request(GPIO_FN_CLKAUDIOBO, NULL);
 	gpio_request(GPIO_FN_FSIIASD,    NULL);
 
+	/* set SPU2 clock to 83.4 MHz */
+	clk = clk_get(NULL, "spu_clk");
+	clk_set_rate(clk, clk_round_rate(clk, 83333333));
+	clk_put(clk);
+
 	/* change parent of FSI A */
-	fsia_clk = clk_get(NULL, "fsia_clk");
+	clk = clk_get(NULL, "fsia_clk");
 	clk_register(&fsimcka_clk);
-	clk_set_parent(fsia_clk, &fsimcka_clk);
-	clk_set_rate(fsia_clk, 11000);
+	clk_set_parent(clk, &fsimcka_clk);
+	clk_set_rate(clk, 11000);
 	clk_set_rate(&fsimcka_clk, 11000);
-	clk_put(fsia_clk);
+	clk_put(clk);
 
 	/* SDHI0 connected to cn7 */
 	gpio_request(GPIO_FN_SDHI0CD, NULL);
@@ -792,6 +794,10 @@
 	gpio_request(GPIO_FN_SDHI1CMD, NULL);
 	gpio_request(GPIO_FN_SDHI1CLK, NULL);
 
+	/* enable IrDA */
+	gpio_request(GPIO_FN_IRDA_OUT, NULL);
+	gpio_request(GPIO_FN_IRDA_IN,  NULL);
+
 	/*
 	 * enable SH-Eth
 	 *
diff --git a/arch/sh/boards/mach-se/7780/irq.c b/arch/sh/boards/mach-se/7780/irq.c
index 121744c..d5c9edc 100644
--- a/arch/sh/boards/mach-se/7780/irq.c
+++ b/arch/sh/boards/mach-se/7780/irq.c
@@ -24,30 +24,30 @@
 void __init init_se7780_IRQ(void)
 {
 	/* enable all interrupt at FPGA */
-	ctrl_outw(0, FPGA_INTMSK1);
+	__raw_writew(0, FPGA_INTMSK1);
 	/* mask SM501 interrupt */
-	ctrl_outw((ctrl_inw(FPGA_INTMSK1) | 0x0002), FPGA_INTMSK1);
+	__raw_writew((__raw_readw(FPGA_INTMSK1) | 0x0002), FPGA_INTMSK1);
 	/* enable all interrupt at FPGA */
-	ctrl_outw(0, FPGA_INTMSK2);
+	__raw_writew(0, FPGA_INTMSK2);
 
 	/* set FPGA INTSEL register */
 	/* FPGA + 0x06 */
-	ctrl_outw( ((IRQPIN_SM501 << IRQPOS_SM501) |
+	__raw_writew( ((IRQPIN_SM501 << IRQPOS_SM501) |
 		(IRQPIN_SMC91CX << IRQPOS_SMC91CX)), FPGA_INTSEL1);
 
 	/* FPGA + 0x08 */
-	ctrl_outw(((IRQPIN_EXTINT4 << IRQPOS_EXTINT4) |
+	__raw_writew(((IRQPIN_EXTINT4 << IRQPOS_EXTINT4) |
 		(IRQPIN_EXTINT3 << IRQPOS_EXTINT3) |
 		(IRQPIN_EXTINT2 << IRQPOS_EXTINT2) |
 		(IRQPIN_EXTINT1 << IRQPOS_EXTINT1)), FPGA_INTSEL2);
 
 	/* FPGA + 0x0A */
-	ctrl_outw((IRQPIN_PCCPW << IRQPOS_PCCPW), FPGA_INTSEL3);
+	__raw_writew((IRQPIN_PCCPW << IRQPOS_PCCPW), FPGA_INTSEL3);
 
 	plat_irq_setup_pins(IRQ_MODE_IRQ); /* install handlers for IRQ0-7 */
 
 	/* ICR1: detect low level(for 2ndcut) */
-	ctrl_outl(0xAAAA0000, INTC_ICR1);
+	__raw_writel(0xAAAA0000, INTC_ICR1);
 
 	/*
 	 * FPGA PCISEL register initialize
@@ -63,6 +63,6 @@
 	 *  INTD || INTD  | INTC  |  --   | INTA
 	 *  -------------------------------------
 	 */
-	ctrl_outw(0x0013, FPGA_PCI_INTSEL1);
-	ctrl_outw(0xE402, FPGA_PCI_INTSEL2);
+	__raw_writew(0x0013, FPGA_PCI_INTSEL1);
+	__raw_writew(0xE402, FPGA_PCI_INTSEL2);
 }
diff --git a/arch/sh/boards/mach-se/7780/setup.c b/arch/sh/boards/mach-se/7780/setup.c
index 1d3a867..6f7c207 100644
--- a/arch/sh/boards/mach-se/7780/setup.c
+++ b/arch/sh/boards/mach-se/7780/setup.c
@@ -17,26 +17,17 @@
 #include <asm/heartbeat.h>
 
 /* Heartbeat */
-static struct heartbeat_data heartbeat_data = {
-	.regsize = 16,
-};
-
-static struct resource heartbeat_resources[] = {
-	[0] = {
-		.start  = PA_LED,
-		.end    = PA_LED,
-		.flags  = IORESOURCE_MEM,
-	},
+static struct resource heartbeat_resource = {
+	.start  = PA_LED,
+	.end    = PA_LED,
+	.flags  = IORESOURCE_MEM | IORESOURCE_MEM_16BIT,
 };
 
 static struct platform_device heartbeat_device = {
 	.name           = "heartbeat",
 	.id             = -1,
-	.dev = {
-		.platform_data = &heartbeat_data,
-	},
-	.num_resources  = ARRAY_SIZE(heartbeat_resources),
-	.resource       = heartbeat_resources,
+	.num_resources  = 1,
+	.resource       = &heartbeat_resource,
 };
 
 /* SMC91x */
@@ -84,14 +75,14 @@
 static void __init se7780_setup(char **cmdline_p)
 {
 	/* "SH-Linux" on LED Display */
-	ctrl_outw( 'S' , PA_LED_DISP + (DISP_SEL0_ADDR << 1) );
-	ctrl_outw( 'H' , PA_LED_DISP + (DISP_SEL1_ADDR << 1) );
-	ctrl_outw( '-' , PA_LED_DISP + (DISP_SEL2_ADDR << 1) );
-	ctrl_outw( 'L' , PA_LED_DISP + (DISP_SEL3_ADDR << 1) );
-	ctrl_outw( 'i' , PA_LED_DISP + (DISP_SEL4_ADDR << 1) );
-	ctrl_outw( 'n' , PA_LED_DISP + (DISP_SEL5_ADDR << 1) );
-	ctrl_outw( 'u' , PA_LED_DISP + (DISP_SEL6_ADDR << 1) );
-	ctrl_outw( 'x' , PA_LED_DISP + (DISP_SEL7_ADDR << 1) );
+	__raw_writew( 'S' , PA_LED_DISP + (DISP_SEL0_ADDR << 1) );
+	__raw_writew( 'H' , PA_LED_DISP + (DISP_SEL1_ADDR << 1) );
+	__raw_writew( '-' , PA_LED_DISP + (DISP_SEL2_ADDR << 1) );
+	__raw_writew( 'L' , PA_LED_DISP + (DISP_SEL3_ADDR << 1) );
+	__raw_writew( 'i' , PA_LED_DISP + (DISP_SEL4_ADDR << 1) );
+	__raw_writew( 'n' , PA_LED_DISP + (DISP_SEL5_ADDR << 1) );
+	__raw_writew( 'u' , PA_LED_DISP + (DISP_SEL6_ADDR << 1) );
+	__raw_writew( 'x' , PA_LED_DISP + (DISP_SEL7_ADDR << 1) );
 
 	printk(KERN_INFO "Hitachi UL Solutions Engine 7780SE03 support.\n");
 
@@ -102,15 +93,15 @@
 	 *   REQ2/GNT2 -> Serial ATA
 	 *   REQ3/GNT3 -> PCI slot
 	 */
-	ctrl_outw(0x0213, FPGA_REQSEL);
+	__raw_writew(0x0213, FPGA_REQSEL);
 
 	/* GPIO setting */
-	ctrl_outw(0x0000, GPIO_PECR);
-	ctrl_outw(ctrl_inw(GPIO_PHCR)&0xfff3, GPIO_PHCR);
-	ctrl_outw(0x0c00, GPIO_PMSELR);
+	__raw_writew(0x0000, GPIO_PECR);
+	__raw_writew(__raw_readw(GPIO_PHCR)&0xfff3, GPIO_PHCR);
+	__raw_writew(0x0c00, GPIO_PMSELR);
 
 	/* iVDR Power ON */
-	ctrl_outw(0x0001, FPGA_IVDRPW);
+	__raw_writew(0x0001, FPGA_IVDRPW);
 }
 
 /*
diff --git a/arch/sh/boards/mach-sh03/rtc.c b/arch/sh/boards/mach-sh03/rtc.c
index a8b9f84..1b20099 100644
--- a/arch/sh/boards/mach-sh03/rtc.c
+++ b/arch/sh/boards/mach-sh03/rtc.c
@@ -44,15 +44,15 @@
 	spin_lock(&sh03_rtc_lock);
  again:
 	do {
-		sec  = (ctrl_inb(RTC_SEC1) & 0xf) + (ctrl_inb(RTC_SEC10) & 0x7) * 10;
-		min  = (ctrl_inb(RTC_MIN1) & 0xf) + (ctrl_inb(RTC_MIN10) & 0xf) * 10;
-		hour = (ctrl_inb(RTC_HOU1) & 0xf) + (ctrl_inb(RTC_HOU10) & 0xf) * 10;
-		day  = (ctrl_inb(RTC_DAY1) & 0xf) + (ctrl_inb(RTC_DAY10) & 0xf) * 10;
-		mon  = (ctrl_inb(RTC_MON1) & 0xf) + (ctrl_inb(RTC_MON10) & 0xf) * 10;
-		year = (ctrl_inb(RTC_YEA1) & 0xf) + (ctrl_inb(RTC_YEA10) & 0xf) * 10
-		     + (ctrl_inb(RTC_YEA100 ) & 0xf) * 100
-		     + (ctrl_inb(RTC_YEA1000) & 0xf) * 1000;
-	} while (sec != (ctrl_inb(RTC_SEC1) & 0xf) + (ctrl_inb(RTC_SEC10) & 0x7) * 10);
+		sec  = (__raw_readb(RTC_SEC1) & 0xf) + (__raw_readb(RTC_SEC10) & 0x7) * 10;
+		min  = (__raw_readb(RTC_MIN1) & 0xf) + (__raw_readb(RTC_MIN10) & 0xf) * 10;
+		hour = (__raw_readb(RTC_HOU1) & 0xf) + (__raw_readb(RTC_HOU10) & 0xf) * 10;
+		day  = (__raw_readb(RTC_DAY1) & 0xf) + (__raw_readb(RTC_DAY10) & 0xf) * 10;
+		mon  = (__raw_readb(RTC_MON1) & 0xf) + (__raw_readb(RTC_MON10) & 0xf) * 10;
+		year = (__raw_readb(RTC_YEA1) & 0xf) + (__raw_readb(RTC_YEA10) & 0xf) * 10
+		     + (__raw_readb(RTC_YEA100 ) & 0xf) * 100
+		     + (__raw_readb(RTC_YEA1000) & 0xf) * 1000;
+	} while (sec != (__raw_readb(RTC_SEC1) & 0xf) + (__raw_readb(RTC_SEC10) & 0x7) * 10);
 	if (year == 0 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
 	    hour > 23 || min > 59 || sec > 59) {
 		printk(KERN_ERR
@@ -60,16 +60,16 @@
 		printk("year=%d, mon=%d, day=%d, hour=%d, min=%d, sec=%d\n",
 		       year, mon, day, hour, min, sec);
 
-		ctrl_outb(0, RTC_SEC1); ctrl_outb(0, RTC_SEC10);
-		ctrl_outb(0, RTC_MIN1); ctrl_outb(0, RTC_MIN10);
-		ctrl_outb(0, RTC_HOU1); ctrl_outb(0, RTC_HOU10);
-		ctrl_outb(6, RTC_WEE1);
-		ctrl_outb(1, RTC_DAY1); ctrl_outb(0, RTC_DAY10);
-		ctrl_outb(1, RTC_MON1); ctrl_outb(0, RTC_MON10);
-		ctrl_outb(0, RTC_YEA1); ctrl_outb(0, RTC_YEA10);
-		ctrl_outb(0, RTC_YEA100);
-		ctrl_outb(2, RTC_YEA1000);
-		ctrl_outb(0, RTC_CTL);
+		__raw_writeb(0, RTC_SEC1); __raw_writeb(0, RTC_SEC10);
+		__raw_writeb(0, RTC_MIN1); __raw_writeb(0, RTC_MIN10);
+		__raw_writeb(0, RTC_HOU1); __raw_writeb(0, RTC_HOU10);
+		__raw_writeb(6, RTC_WEE1);
+		__raw_writeb(1, RTC_DAY1); __raw_writeb(0, RTC_DAY10);
+		__raw_writeb(1, RTC_MON1); __raw_writeb(0, RTC_MON10);
+		__raw_writeb(0, RTC_YEA1); __raw_writeb(0, RTC_YEA10);
+		__raw_writeb(0, RTC_YEA100);
+		__raw_writeb(2, RTC_YEA1000);
+		__raw_writeb(0, RTC_CTL);
 		goto again;
 	}
 
@@ -93,9 +93,9 @@
 	/* gets recalled with irq locally disabled */
 	spin_lock(&sh03_rtc_lock);
 	for (i = 0 ; i < 1000000 ; i++)	/* may take up to 1 second... */
-		if (!(ctrl_inb(RTC_CTL) & RTC_BUSY))
+		if (!(__raw_readb(RTC_CTL) & RTC_BUSY))
 			break;
-	cmos_minutes = (ctrl_inb(RTC_MIN1) & 0xf) + (ctrl_inb(RTC_MIN10) & 0xf) * 10;
+	cmos_minutes = (__raw_readb(RTC_MIN1) & 0xf) + (__raw_readb(RTC_MIN10) & 0xf) * 10;
 	real_seconds = nowtime % 60;
 	real_minutes = nowtime / 60;
 	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
@@ -103,10 +103,10 @@
 	real_minutes %= 60;
 
 	if (abs(real_minutes - cmos_minutes) < 30) {
-		ctrl_outb(real_seconds % 10, RTC_SEC1);
-		ctrl_outb(real_seconds / 10, RTC_SEC10);
-		ctrl_outb(real_minutes % 10, RTC_MIN1);
-		ctrl_outb(real_minutes / 10, RTC_MIN10);
+		__raw_writeb(real_seconds % 10, RTC_SEC1);
+		__raw_writeb(real_seconds / 10, RTC_SEC10);
+		__raw_writeb(real_minutes % 10, RTC_MIN1);
+		__raw_writeb(real_minutes / 10, RTC_MIN10);
 	} else {
 		printk(KERN_WARNING
 		       "set_rtc_mmss: can't update from %d to %d\n",
diff --git a/arch/sh/boards/mach-sh03/setup.c b/arch/sh/boards/mach-sh03/setup.c
index 74cfb4b..af4a0c0 100644
--- a/arch/sh/boards/mach-sh03/setup.c
+++ b/arch/sh/boards/mach-sh03/setup.c
@@ -82,7 +82,7 @@
 	/* open I/O area window */
 	paddrbase = virt_to_phys((void *)PA_AREA5_IO);
 	prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
-	cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot);
+	cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot));
 	if (!cf_ide_base) {
 		printk("allocate_cf_area : can't open CF I/O window!\n");
 		return -ENOMEM;
diff --git a/arch/sh/boards/mach-sh7763rdp/irq.c b/arch/sh/boards/mach-sh7763rdp/irq.c
index d8ebfa7..add698c 100644
--- a/arch/sh/boards/mach-sh7763rdp/irq.c
+++ b/arch/sh/boards/mach-sh7763rdp/irq.c
@@ -28,18 +28,18 @@
 void __init init_sh7763rdp_IRQ(void)
 {
 	/* GPIO enabled */
-	ctrl_outl(1 << 25, INTC_INT2MSKCR);
+	__raw_writel(1 << 25, INTC_INT2MSKCR);
 
 	/* enable GPIO interrupts */
-	ctrl_outl((ctrl_inl(INTC_INT2PRI7) & 0xFF00FFFF) | 0x000F0000,
+	__raw_writel((__raw_readl(INTC_INT2PRI7) & 0xFF00FFFF) | 0x000F0000,
 		  INTC_INT2PRI7);
 
 	/* USBH enabled */
-	ctrl_outl(1 << 17, INTC_INT2MSKCR1);
+	__raw_writel(1 << 17, INTC_INT2MSKCR1);
 
 	/* GETHER enabled */
-	ctrl_outl(1 << 16, INTC_INT2MSKCR1);
+	__raw_writel(1 << 16, INTC_INT2MSKCR1);
 
 	/* DMAC enabled */
-	ctrl_outl(1 << 8, INTC_INT2MSKCR);
+	__raw_writel(1 << 8, INTC_INT2MSKCR);
 }
diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c
index 390534a..f64a691 100644
--- a/arch/sh/boards/mach-sh7763rdp/setup.c
+++ b/arch/sh/boards/mach-sh7763rdp/setup.c
@@ -158,50 +158,50 @@
 static void __init sh7763rdp_setup(char **cmdline_p)
 {
 	/* Board version check */
-	if (ctrl_inw(CPLD_BOARD_ID_ERV_REG) == 0xECB1)
+	if (__raw_readw(CPLD_BOARD_ID_ERV_REG) == 0xECB1)
 		printk(KERN_INFO "RTE Standard Configuration\n");
 	else
 		printk(KERN_INFO "RTA Standard Configuration\n");
 
 	/* USB pin select bits (clear bit 5-2 to 0) */
-	ctrl_outw((ctrl_inw(PORT_PSEL2) & 0xFFC3), PORT_PSEL2);
+	__raw_writew((__raw_readw(PORT_PSEL2) & 0xFFC3), PORT_PSEL2);
 	/* USBH setup port I controls to other (clear bits 4-9 to 0) */
-	ctrl_outw(ctrl_inw(PORT_PICR) & 0xFC0F, PORT_PICR);
+	__raw_writew(__raw_readw(PORT_PICR) & 0xFC0F, PORT_PICR);
 
 	/* Select USB Host controller */
-	ctrl_outw(0x00, USB_USBHSC);
+	__raw_writew(0x00, USB_USBHSC);
 
 	/* For LCD */
 	/* set PTJ7-1, bits 15-2 of PJCR to 0 */
-	ctrl_outw(ctrl_inw(PORT_PJCR) & 0x0003, PORT_PJCR);
+	__raw_writew(__raw_readw(PORT_PJCR) & 0x0003, PORT_PJCR);
 	/* set PTI5, bits 11-10 of PICR to 0 */
-	ctrl_outw(ctrl_inw(PORT_PICR) & 0xF3FF, PORT_PICR);
-	ctrl_outw(0, PORT_PKCR);
-	ctrl_outw(0, PORT_PLCR);
+	__raw_writew(__raw_readw(PORT_PICR) & 0xF3FF, PORT_PICR);
+	__raw_writew(0, PORT_PKCR);
+	__raw_writew(0, PORT_PLCR);
 	/* set PSEL2 bits 14-8, 5-4, of PSEL2 to 0 */
-	ctrl_outw((ctrl_inw(PORT_PSEL2) & 0x00C0), PORT_PSEL2);
+	__raw_writew((__raw_readw(PORT_PSEL2) & 0x00C0), PORT_PSEL2);
 	/* set PSEL3 bits 14-12, 6-4, 2-0 of PSEL3 to 0 */
-	ctrl_outw((ctrl_inw(PORT_PSEL3) & 0x0700), PORT_PSEL3);
+	__raw_writew((__raw_readw(PORT_PSEL3) & 0x0700), PORT_PSEL3);
 
 	/* For HAC */
 	/* bit3-0  0100:HAC & SSI1 enable */
-	ctrl_outw((ctrl_inw(PORT_PSEL1) & 0xFFF0) | 0x0004, PORT_PSEL1);
+	__raw_writew((__raw_readw(PORT_PSEL1) & 0xFFF0) | 0x0004, PORT_PSEL1);
 	/* bit14      1:SSI_HAC_CLK enable */
-	ctrl_outw(ctrl_inw(PORT_PSEL4) | 0x4000, PORT_PSEL4);
+	__raw_writew(__raw_readw(PORT_PSEL4) | 0x4000, PORT_PSEL4);
 
 	/* SH-Ether */
-	ctrl_outw((ctrl_inw(PORT_PSEL1) & ~0xff00) | 0x2400, PORT_PSEL1);
-	ctrl_outw(0x0, PORT_PFCR);
-	ctrl_outw(0x0, PORT_PFCR);
-	ctrl_outw(0x0, PORT_PFCR);
+	__raw_writew((__raw_readw(PORT_PSEL1) & ~0xff00) | 0x2400, PORT_PSEL1);
+	__raw_writew(0x0, PORT_PFCR);
+	__raw_writew(0x0, PORT_PFCR);
+	__raw_writew(0x0, PORT_PFCR);
 
 	/* MMC */
 	/*selects SCIF and MMC other functions */
-	ctrl_outw(0x0001, PORT_PSEL0);
+	__raw_writew(0x0001, PORT_PSEL0);
 	/* MMC clock operates */
-	ctrl_outl(ctrl_inl(MSTPCR1) & ~0x8, MSTPCR1);
-	ctrl_outw(ctrl_inw(PORT_PACR) & ~0x3000, PORT_PACR);
-	ctrl_outw(ctrl_inw(PORT_PCCR) & ~0xCFC3, PORT_PCCR);
+	__raw_writel(__raw_readl(MSTPCR1) & ~0x8, MSTPCR1);
+	__raw_writew(__raw_readw(PORT_PACR) & ~0x3000, PORT_PACR);
+	__raw_writew(__raw_readw(PORT_PCCR) & ~0xCFC3, PORT_PCCR);
 }
 
 static struct sh_machine_vector mv_sh7763rdp __initmv = {
diff --git a/arch/sh/boards/mach-snapgear/setup.c b/arch/sh/boards/mach-snapgear/setup.c
index a3277a2..331745d 100644
--- a/arch/sh/boards/mach-snapgear/setup.c
+++ b/arch/sh/boards/mach-snapgear/setup.c
@@ -30,7 +30,7 @@
 
 static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
 {
-	(void)ctrl_inb(0xb8000000);	/* dummy read */
+	(void)__raw_readb(0xb8000000);	/* dummy read */
 
 	printk("SnapGear: erase switch interrupt!\n");
 
diff --git a/arch/sh/boards/mach-systemh/irq.c b/arch/sh/boards/mach-systemh/irq.c
index 986a0e7..523aea5 100644
--- a/arch/sh/boards/mach-systemh/irq.c
+++ b/arch/sh/boards/mach-systemh/irq.c
@@ -41,13 +41,13 @@
 		unsigned long val, mask = 0x01 << 1;
 
 		/* Clear the "irq"th bit in the mask and set it in the request */
-		val = ctrl_inl((unsigned long)systemh_irq_mask_register);
+		val = __raw_readl((unsigned long)systemh_irq_mask_register);
 		val &= ~mask;
-		ctrl_outl(val, (unsigned long)systemh_irq_mask_register);
+		__raw_writel(val, (unsigned long)systemh_irq_mask_register);
 
-		val = ctrl_inl((unsigned long)systemh_irq_request_register);
+		val = __raw_readl((unsigned long)systemh_irq_request_register);
 		val |= mask;
-		ctrl_outl(val, (unsigned long)systemh_irq_request_register);
+		__raw_writel(val, (unsigned long)systemh_irq_request_register);
 	}
 }
 
@@ -57,9 +57,9 @@
 		unsigned long val, mask = 0x01 << 1;
 
 		/* Set "irq"th bit in the mask register */
-		val = ctrl_inl((unsigned long)systemh_irq_mask_register);
+		val = __raw_readl((unsigned long)systemh_irq_mask_register);
 		val |= mask;
-		ctrl_outl(val, (unsigned long)systemh_irq_mask_register);
+		__raw_writel(val, (unsigned long)systemh_irq_mask_register);
 	}
 }
 
diff --git a/arch/sh/boards/mach-titan/Makefile b/arch/sh/boards/mach-titan/Makefile
deleted file mode 100644
index 08d7537..0000000
--- a/arch/sh/boards/mach-titan/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the Nimble Microsystems TITAN specific parts of the kernel
-#
-
-obj-y	 := setup.o io.o
diff --git a/arch/sh/boards/mach-titan/io.c b/arch/sh/boards/mach-titan/io.c
deleted file mode 100644
index 0130e98..0000000
--- a/arch/sh/boards/mach-titan/io.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- *	I/O routines for Titan
- */
-#include <linux/pci.h>
-#include <asm/machvec.h>
-#include <asm/addrspace.h>
-#include <mach/titan.h>
-#include <asm/io.h>
-
-static inline unsigned int port2adr(unsigned int port)
-{
-        maybebadio((unsigned long)port);
-        return port;
-}
-
-u8 titan_inb(unsigned long port)
-{
-        if (PXSEG(port))
-                return ctrl_inb(port);
-        return ctrl_inw(port2adr(port)) & 0xff;
-}
-
-u8 titan_inb_p(unsigned long port)
-{
-        u8 v;
-
-        if (PXSEG(port))
-                v = ctrl_inb(port);
-        else
-                v = ctrl_inw(port2adr(port)) & 0xff;
-        ctrl_delay();
-        return v;
-}
-
-u16 titan_inw(unsigned long port)
-{
-        if (PXSEG(port))
-                return ctrl_inw(port);
-        else if (port >= 0x2000)
-                return ctrl_inw(port2adr(port));
-        else
-                maybebadio(port);
-        return 0;
-}
-
-u32 titan_inl(unsigned long port)
-{
-        if (PXSEG(port))
-                return ctrl_inl(port);
-        else if (port >= 0x2000)
-                return ctrl_inw(port2adr(port));
-        else
-                maybebadio(port);
-        return 0;
-}
-
-void titan_outb(u8 value, unsigned long port)
-{
-        if (PXSEG(port))
-                ctrl_outb(value, port);
-        else
-                ctrl_outw(value, port2adr(port));
-}
-
-void titan_outb_p(u8 value, unsigned long port)
-{
-        if (PXSEG(port))
-                ctrl_outb(value, port);
-        else
-                ctrl_outw(value, port2adr(port));
-        ctrl_delay();
-}
-
-void titan_outw(u16 value, unsigned long port)
-{
-        if (PXSEG(port))
-                ctrl_outw(value, port);
-        else if (port >= 0x2000)
-                ctrl_outw(value, port2adr(port));
-        else
-                maybebadio(port);
-}
-
-void titan_outl(u32 value, unsigned long port)
-{
-        if (PXSEG(port))
-                ctrl_outl(value, port);
-        else
-                maybebadio(port);
-}
-
-void titan_insl(unsigned long port, void *dst, unsigned long count)
-{
-        maybebadio(port);
-}
-
-void titan_outsl(unsigned long port, const void *src, unsigned long count)
-{
-        maybebadio(port);
-}
-
-void __iomem *titan_ioport_map(unsigned long port, unsigned int size)
-{
-	if (PXSEG(port))
-		return (void __iomem *)port;
-
-	return (void __iomem *)port2adr(port);
-}
diff --git a/arch/sh/boards/mach-titan/setup.c b/arch/sh/boards/mach-titan/setup.c
deleted file mode 100644
index 81e7e0f..0000000
--- a/arch/sh/boards/mach-titan/setup.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * arch/sh/boards/titan/setup.c - Setup for Titan
- *
- *  Copyright (C) 2006  Jamie Lenehan
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <mach/titan.h>
-#include <asm/io.h>
-
-static void __init init_titan_irq(void)
-{
-	/* enable individual interrupt mode for externals */
-	plat_irq_setup_pins(IRQ_MODE_IRQ);
-}
-
-static struct sh_machine_vector mv_titan __initmv = {
-	.mv_name =	"Titan",
-
-	.mv_inb =	titan_inb,
-	.mv_inw =	titan_inw,
-	.mv_inl =	titan_inl,
-	.mv_outb =	titan_outb,
-	.mv_outw =	titan_outw,
-	.mv_outl =	titan_outl,
-
-	.mv_inb_p =	titan_inb_p,
-	.mv_inw_p =	titan_inw,
-	.mv_inl_p =	titan_inl,
-	.mv_outb_p =	titan_outb_p,
-	.mv_outw_p =	titan_outw,
-	.mv_outl_p =	titan_outl,
-
-	.mv_insl =	titan_insl,
-	.mv_outsl =	titan_outsl,
-
-	.mv_ioport_map = titan_ioport_map,
-
-	.mv_init_irq =	init_titan_irq,
-};
diff --git a/arch/sh/boards/mach-x3proto/ilsel.c b/arch/sh/boards/mach-x3proto/ilsel.c
index b5c673c..5c98427 100644
--- a/arch/sh/boards/mach-x3proto/ilsel.c
+++ b/arch/sh/boards/mach-x3proto/ilsel.c
@@ -70,10 +70,10 @@
 	pr_debug("%s: bit#%d: addr - 0x%08lx (shift %d, set %d)\n",
 		 __func__, bit, addr, shift, set);
 
-	tmp = ctrl_inw(addr);
+	tmp = __raw_readw(addr);
 	tmp &= ~(0xf << shift);
 	tmp |= set << shift;
-	ctrl_outw(tmp, addr);
+	__raw_writew(tmp, addr);
 }
 
 /**
@@ -142,9 +142,9 @@
 
 	addr = mk_ilsel_addr(irq);
 
-	tmp = ctrl_inw(addr);
+	tmp = __raw_readw(addr);
 	tmp &= ~(0xf << mk_ilsel_shift(irq));
-	ctrl_outw(tmp, addr);
+	__raw_writew(tmp, addr);
 
 	clear_bit(irq, &ilsel_level_map);
 }
diff --git a/arch/sh/boards/mach-x3proto/setup.c b/arch/sh/boards/mach-x3proto/setup.c
index efe4cb9..e284592 100644
--- a/arch/sh/boards/mach-x3proto/setup.c
+++ b/arch/sh/boards/mach-x3proto/setup.c
@@ -149,7 +149,7 @@
 	plat_irq_setup_pins(IRQ_MODE_IRL3210);
 
 	/* Set ICR0.LVLMODE */
-	ctrl_outl(ctrl_inl(0xfe410000) | (1 << 21), 0xfe410000);
+	__raw_writel(__raw_readl(0xfe410000) | (1 << 21), 0xfe410000);
 }
 
 static struct sh_machine_vector mv_x3proto __initmv = {
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index cb8cf55..1ce6362 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -21,12 +21,15 @@
 CONFIG_ENTRY_OFFSET	?= 0x00001000
 
 suffix-y := bin
-suffix-$(CONFIG_KERNEL_GZIP)  := gz
-suffix-$(CONFIG_KERNEL_BZIP2) := bz2
-suffix-$(CONFIG_KERNEL_LZMA)  := lzma
+suffix-$(CONFIG_KERNEL_GZIP)	:= gz
+suffix-$(CONFIG_KERNEL_BZIP2)	:= bz2
+suffix-$(CONFIG_KERNEL_LZMA)	:= lzma
+suffix-$(CONFIG_KERNEL_LZO)	:= lzo
 
-targets := zImage vmlinux.srec romImage uImage uImage.srec uImage.gz uImage.bz2 uImage.lzma uImage.bin
-extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma
+targets := zImage vmlinux.srec romImage uImage uImage.srec uImage.gz \
+	   uImage.bz2 uImage.lzma uImage.lzo uImage.bin
+extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
+	   vmlinux.bin.lzo
 subdir- := compressed romimage
 
 $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
@@ -43,15 +46,8 @@
 $(obj)/romimage/vmlinux: $(obj)/zImage FORCE
 	$(Q)$(MAKE) $(build)=$(obj)/romimage $@
 
-KERNEL_MEMORY := 0x00000000
-ifeq ($(CONFIG_PMB_FIXED),y)
-KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
+KERNEL_MEMORY	:= $(shell /bin/bash -c 'printf "0x%08x" \
 		     $$[$(CONFIG_MEMORY_START) & 0x1fffffff]')
-endif
-ifeq ($(CONFIG_29BIT),y)
-KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
-		     $$[$(CONFIG_MEMORY_START)]')
-endif
 
 KERNEL_LOAD	:= $(shell /bin/bash -c 'printf "0x%08x" \
 		     $$[$(CONFIG_PAGE_OFFSET)  + \
@@ -80,6 +76,9 @@
 $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
 	$(call if_changed,lzma)
 
+$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
+	$(call if_changed,lzo)
+
 $(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
 	$(call if_changed,uimage,bzip2)
 
@@ -89,6 +88,9 @@
 $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
 	$(call if_changed,uimage,lzma)
 
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
+	$(call if_changed,uimage,lzo)
+
 $(obj)/uImage.bin: $(obj)/vmlinux.bin
 	$(call if_changed,uimage,none)
 
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index 6182eca..5d660b9 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -6,14 +6,11 @@
 
 targets		:= vmlinux vmlinux.bin vmlinux.bin.gz \
 		   vmlinux.bin.bz2 vmlinux.bin.lzma \
+		   vmlinux.bin.lzo \
 		   head_$(BITS).o misc.o piggy.o
 
 OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
 
-ifdef CONFIG_SH_STANDARD_BIOS
-OBJECTS += $(obj)/../../kernel/sh_bios.o
-endif
-
 #
 # IMAGE_OFFSET is the load offset of the compression loader
 #
@@ -47,6 +44,8 @@
 	$(call if_changed,bzip2)
 $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
 	$(call if_changed,lzma)
+$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
+	$(call if_changed,lzo)
 
 OBJCOPYFLAGS += -R .empty_zero_page
 
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index b51b1fc..27140a6 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -14,7 +14,6 @@
 #include <asm/uaccess.h>
 #include <asm/addrspace.h>
 #include <asm/page.h>
-#include <asm/sh_bios.h>
 
 /*
  * gzip declarations
@@ -62,29 +61,15 @@
 #include "../../../../lib/decompress_unlzma.c"
 #endif
 
-#ifdef CONFIG_SH_STANDARD_BIOS
-size_t strlen(const char *s)
-{
-	int i = 0;
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
 
-	while (*s++)
-		i++;
-	return i;
-}
-
-int puts(const char *s)
-{
-	int len = strlen(s);
-	sh_bios_console_write(s, len);
-	return len;
-}
-#else
 int puts(const char *s)
 {
 	/* This should be updated to use the sh-sci routines */
 	return 0;
 }
-#endif
 
 void* memset(void* s, int c, size_t n)
 {
@@ -132,7 +117,7 @@
 	output_addr = (CONFIG_MEMORY_START + 0x2000);
 #else
 	output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
-#ifdef CONFIG_29BIT
+#if defined(CONFIG_29BIT)
 	output_addr |= P2SEG;
 #endif
 #endif
diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c
index 50aa0c1..bcb31ae 100644
--- a/arch/sh/cchips/hd6446x/hd64461.c
+++ b/arch/sh/cchips/hd6446x/hd64461.c
@@ -55,25 +55,22 @@
 
 static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc)
 {
-	unsigned short intv = ctrl_inw(HD64461_NIRR);
-	struct irq_desc *ext_desc;
+	unsigned short intv = __raw_readw(HD64461_NIRR);
 	unsigned int ext_irq = HD64461_IRQBASE;
 
 	intv &= (1 << HD64461_IRQ_NUM) - 1;
 
-	while (intv) {
-		if (intv & 1) {
-			ext_desc = irq_desc + ext_irq;
-			handle_level_irq(ext_irq, ext_desc);
-		}
-		intv >>= 1;
-		ext_irq++;
+	for (; intv; intv >>= 1, ext_irq++) {
+		if (!(intv & 1))
+			continue;
+
+		generic_handle_irq(ext_irq);
 	}
 }
 
 int __init setup_hd64461(void)
 {
-	int i;
+	int i, nid = cpu_to_node(boot_cpu_data);
 
 	if (!MACH_HD64461)
 		return 0;
@@ -90,9 +87,26 @@
 	__raw_writew(0xffff, HD64461_NIMR);
 
 	/*  IRQ 80 -> 95 belongs to HD64461  */
-	for (i = HD64461_IRQBASE; i < HD64461_IRQBASE + 16; i++)
+	for (i = HD64461_IRQBASE; i < HD64461_IRQBASE + 16; i++) {
+		unsigned int irq;
+
+		irq = create_irq_nr(i, nid);
+		if (unlikely(irq == 0)) {
+			pr_err("%s: failed hooking irq %d for HD64461\n",
+			       __func__, i);
+			return -EBUSY;
+		}
+
+		if (unlikely(irq != i)) {
+			pr_err("%s: got irq %d but wanted %d, bailing.\n",
+			       __func__, irq, i);
+			destroy_irq(irq);
+			return -EINVAL;
+		}
+
 		set_irq_chip_and_handler(i, &hd64461_irq_chip,
 					 handle_level_irq);
+	}
 
 	set_irq_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux);
 	set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW);
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
new file mode 100644
index 0000000..9b331ea
--- /dev/null
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -0,0 +1,1754 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.33-rc7
+# Tue Feb  9 15:27:06 2010
+#
+CONFIG_SUPERH=y
+CONFIG_SUPERH32=y
+# CONFIG_SUPERH64 is not set
+CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig"
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
+# CONFIG_GENERIC_GPIO is not set
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_ARCH_SUSPEND_POSSIBLE is not set
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_SYS_SUPPORTS_HUGETLBFS=y
+CONFIG_SYS_SUPPORTS_SMP=y
+CONFIG_SYS_SUPPORTS_NUMA=y
+CONFIG_SYS_SUPPORTS_PCI=y
+CONFIG_SYS_SUPPORTS_TMU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_NO_VIRT_TO_BUS=y
+CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_DMA_COHERENT=y
+# CONFIG_DMA_NONCOHERENT is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+CONFIG_RCU_TRACE=y
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+CONFIG_TREE_RCU_TRACE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+# CONFIG_CPUSETS is not set
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_MEM_RES_CTLR=y
+# CONFIG_CGROUP_MEM_RES_CTLR_SWAP is not set
+CONFIG_MM_OWNER=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+# CONFIG_BLK_DEV_INITRD is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+# CONFIG_OPROFILE is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+CONFIG_BLK_CGROUP=y
+# CONFIG_DEBUG_BLK_CGROUP is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_DEBUG_CFQ_IOSCHED is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System type
+#
+CONFIG_CPU_SH4=y
+CONFIG_CPU_SH4A=y
+CONFIG_CPU_SHX3=y
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+# CONFIG_CPU_SUBTYPE_SH7201 is not set
+# CONFIG_CPU_SUBTYPE_SH7203 is not set
+# CONFIG_CPU_SUBTYPE_SH7206 is not set
+# CONFIG_CPU_SUBTYPE_SH7263 is not set
+# CONFIG_CPU_SUBTYPE_MXG is not set
+# CONFIG_CPU_SUBTYPE_SH7705 is not set
+# CONFIG_CPU_SUBTYPE_SH7706 is not set
+# CONFIG_CPU_SUBTYPE_SH7707 is not set
+# CONFIG_CPU_SUBTYPE_SH7708 is not set
+# CONFIG_CPU_SUBTYPE_SH7709 is not set
+# CONFIG_CPU_SUBTYPE_SH7710 is not set
+# CONFIG_CPU_SUBTYPE_SH7712 is not set
+# CONFIG_CPU_SUBTYPE_SH7720 is not set
+# CONFIG_CPU_SUBTYPE_SH7721 is not set
+# CONFIG_CPU_SUBTYPE_SH7750 is not set
+# CONFIG_CPU_SUBTYPE_SH7091 is not set
+# CONFIG_CPU_SUBTYPE_SH7750R is not set
+# CONFIG_CPU_SUBTYPE_SH7750S is not set
+# CONFIG_CPU_SUBTYPE_SH7751 is not set
+# CONFIG_CPU_SUBTYPE_SH7751R is not set
+# CONFIG_CPU_SUBTYPE_SH7760 is not set
+# CONFIG_CPU_SUBTYPE_SH4_202 is not set
+# CONFIG_CPU_SUBTYPE_SH7723 is not set
+# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
+# CONFIG_CPU_SUBTYPE_SH7763 is not set
+# CONFIG_CPU_SUBTYPE_SH7770 is not set
+# CONFIG_CPU_SUBTYPE_SH7780 is not set
+# CONFIG_CPU_SUBTYPE_SH7785 is not set
+CONFIG_CPU_SUBTYPE_SH7786=y
+# CONFIG_CPU_SUBTYPE_SHX3 is not set
+# CONFIG_CPU_SUBTYPE_SH7343 is not set
+# CONFIG_CPU_SUBTYPE_SH7722 is not set
+# CONFIG_CPU_SUBTYPE_SH7366 is not set
+
+#
+# Memory management options
+#
+CONFIG_QUICKLIST=y
+CONFIG_MMU=y
+CONFIG_PAGE_OFFSET=0x80000000
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_MEMORY_START=0x60000000
+CONFIG_MEMORY_SIZE=0x20000000
+# CONFIG_29BIT is not set
+CONFIG_32BIT=y
+CONFIG_PMB=y
+# CONFIG_PMB_LEGACY is not set
+CONFIG_X2TLB=y
+CONFIG_VSYSCALL=y
+# CONFIG_NUMA is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_MAX_ACTIVE_REGIONS=1
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_ARCH_MEMORY_PROBE=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_64K is not set
+# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
+CONFIG_HUGETLB_PAGE_SIZE_1MB=y
+# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_FLATMEM_MANUAL is not set
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_SPARSE=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_MIGRATION=y
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=1
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+
+#
+# Cache configuration
+#
+CONFIG_CACHE_WRITEBACK=y
+# CONFIG_CACHE_WRITETHROUGH is not set
+# CONFIG_CACHE_OFF is not set
+
+#
+# Processor features
+#
+CONFIG_CPU_LITTLE_ENDIAN=y
+# CONFIG_CPU_BIG_ENDIAN is not set
+CONFIG_SH_FPU=y
+CONFIG_SH_STORE_QUEUES=y
+CONFIG_CPU_HAS_INTEVT=y
+CONFIG_CPU_HAS_SR_RB=y
+CONFIG_CPU_HAS_PTEAEX=y
+CONFIG_CPU_HAS_FPU=y
+
+#
+# Board support
+#
+CONFIG_SH_SDK7786=y
+# CONFIG_SH_URQUELL is not set
+
+#
+# Timer and clock configuration
+#
+CONFIG_SH_TIMER_TMU=y
+CONFIG_SH_CLK_CPG=y
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+CONFIG_CPU_FREQ_STAT=y
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_SH_CPU_FREQ=y
+
+#
+# DMA support
+#
+# CONFIG_SH_DMA is not set
+
+#
+# Companion Chips
+#
+
+#
+# Additional SuperH Device Drivers
+#
+CONFIG_HEARTBEAT=y
+# CONFIG_PUSH_SWITCH is not set
+
+#
+# Kernel features
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
+CONFIG_KEXEC=y
+# CONFIG_CRASH_DUMP is not set
+CONFIG_SECCOMP=y
+# CONFIG_SMP is not set
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_GUSA=y
+
+#
+# Boot options
+#
+CONFIG_ZERO_PAGE_OFFSET=0x00001000
+CONFIG_BOOT_LINK_OFFSET=0x00800000
+CONFIG_ENTRY_OFFSET=0x00001000
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
+CONFIG_CMDLINE="console=ttySC1,115200 earlyprintk=sh-sci.1,115200 root=/dev/sda1 nmi_debug=state,debounce rootdelay=10"
+
+#
+# Bus options
+#
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIEAER=y
+# CONFIG_PCIE_ECRC is not set
+CONFIG_PCIEAER_INJECT=y
+CONFIG_PCIEASPM=y
+CONFIG_PCIEASPM_DEBUG=y
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCI_LEGACY is not set
+CONFIG_PCI_DEBUG=y
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options (EXPERIMENTAL)
+#
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_VERBOSE=y
+# CONFIG_HIBERNATION is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
+# CONFIG_BE2ISCSI is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_HPSA is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_MPT2SAS is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
+# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+CONFIG_SATA_PMP=y
+# CONFIG_SATA_AHCI is not set
+CONFIG_SATA_SIL24=y
+CONFIG_ATA_SFF=y
+# CONFIG_SATA_SVW is not set
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD640_PCI is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_NS87415 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+CONFIG_PATA_PLATFORM=y
+# CONFIG_PATA_SCH is not set
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# The newer stack is recommended.
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_ARCNET is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+CONFIG_MDIO_BITBANG=y
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_STNIC is not set
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_SMC91X=y
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+CONFIG_SMSC911X=y
+# CONFIG_DNET is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_NET_PCI is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_ATL2 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_TR is not set
+CONFIG_WLAN=y
+# CONFIG_ATMEL is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_VMXNET3 is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_SH_KEYSC is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=6
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SH_MOBILE is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_SH_MSIOF is not set
+# CONFIG_SPI_SH_SCI is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
+# CONFIG_SH_WDT is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SH_MOBILE_SDHI is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_88PM8607 is not set
+# CONFIG_AB4500_CORE is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+CONFIG_VGA_ARB=y
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+# CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_ZEROPLUS is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+# CONFIG_USB_DEVICEFS is not set
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_XHCI_HCD is not set
+# CONFIG_USB_EHCI_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+CONFIG_USB_GADGET_M66592=y
+CONFIG_USB_M66592=y
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_MULTI is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_UWB is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+CONFIG_RTC_DRV_MAX6900=y
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_SH=y
+# CONFIG_RTC_DRV_GENERIC is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+CONFIG_UIO=m
+# CONFIG_UIO_CIF is not set
+# CONFIG_UIO_PDRV is not set
+# CONFIG_UIO_PDRV_GENIRQ is not set
+# CONFIG_UIO_SMX is not set
+# CONFIG_UIO_AEC is not set
+# CONFIG_UIO_SERCOS3 is not set
+# CONFIG_UIO_PCI_GENERIC is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+CONFIG_DEBUG_VM=y
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+CONFIG_KSYM_TRACER=y
+# CONFIG_PROFILE_KSYM_TRACER is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_SH_STANDARD_BIOS is not set
+# CONFIG_STACK_DEBUG is not set
+CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_4KSTACKS is not set
+CONFIG_DUMP_CODE=y
+CONFIG_DWARF_UNWINDER=y
+# CONFIG_SH_NO_BSS_INIT is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_MANAGER2 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/drivers/dma/dma-pvr2.c b/arch/sh/drivers/dma/dma-pvr2.c
index 391cbe1..3cee58e 100644
--- a/arch/sh/drivers/dma/dma-pvr2.c
+++ b/arch/sh/drivers/dma/dma-pvr2.c
@@ -40,10 +40,10 @@
 
 static int pvr2_request_dma(struct dma_channel *chan)
 {
-	if (ctrl_inl(PVR2_DMA_MODE) != 0)
+	if (__raw_readl(PVR2_DMA_MODE) != 0)
 		return -EBUSY;
 
-	ctrl_outl(0, PVR2_DMA_LMMODE0);
+	__raw_writel(0, PVR2_DMA_LMMODE0);
 
 	return 0;
 }
@@ -60,9 +60,9 @@
 
 	xfer_complete = 0;
 
-	ctrl_outl(chan->dar, PVR2_DMA_ADDR);
-	ctrl_outl(chan->count, PVR2_DMA_COUNT);
-	ctrl_outl(chan->mode & DMA_MODE_MASK, PVR2_DMA_MODE);
+	__raw_writel(chan->dar, PVR2_DMA_ADDR);
+	__raw_writel(chan->count, PVR2_DMA_COUNT);
+	__raw_writel(chan->mode & DMA_MODE_MASK, PVR2_DMA_MODE);
 
 	return 0;
 }
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
index 37fb5b8..8272087 100644
--- a/arch/sh/drivers/dma/dma-sh.c
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -52,11 +52,14 @@
  *
  * iterations to complete the transfer.
  */
+static unsigned int ts_shift[] = TS_SHIFT;
 static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
 {
-	u32 chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR);
+	u32 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR);
+	int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
+		((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
 
-	return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT];
+	return ts_shift[cnt];
 }
 
 /*
@@ -70,13 +73,13 @@
 	struct dma_channel *chan = dev_id;
 	u32 chcr;
 
-	chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR);
+	chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR);
 
 	if (!(chcr & CHCR_TE))
 		return IRQ_NONE;
 
 	chcr &= ~(CHCR_IE | CHCR_DE);
-	ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR));
+	__raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR));
 
 	wake_up(&chan->wait_queue);
 
@@ -115,7 +118,7 @@
 		chan->flags &= ~DMA_TEI_CAPABLE;
 	}
 
-	ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR));
+	__raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR));
 
 	chan->flags |= DMA_CONFIGURED;
 	return 0;
@@ -126,13 +129,13 @@
 	int irq;
 	u32 chcr;
 
-	chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR);
+	chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR);
 	chcr |= CHCR_DE;
 
 	if (chan->flags & DMA_TEI_CAPABLE)
 		chcr |= CHCR_IE;
 
-	ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR));
+	__raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR));
 
 	if (chan->flags & DMA_TEI_CAPABLE) {
 		irq = get_dmte_irq(chan->chan);
@@ -150,9 +153,9 @@
 		disable_irq(irq);
 	}
 
-	chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR);
+	chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR);
 	chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
-	ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR));
+	__raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR));
 }
 
 static int sh_dmac_xfer_dma(struct dma_channel *chan)
@@ -183,12 +186,12 @@
 	 */
 	if (chan->sar || (mach_is_dreamcast() &&
 			  chan->chan == PVR2_CASCADE_CHAN))
-		ctrl_outl(chan->sar, (dma_base_addr[chan->chan]+SAR));
+		__raw_writel(chan->sar, (dma_base_addr[chan->chan]+SAR));
 	if (chan->dar || (mach_is_dreamcast() &&
 			  chan->chan == PVR2_CASCADE_CHAN))
-		ctrl_outl(chan->dar, (dma_base_addr[chan->chan] + DAR));
+		__raw_writel(chan->dar, (dma_base_addr[chan->chan] + DAR));
 
-	ctrl_outl(chan->count >> calc_xmit_shift(chan),
+	__raw_writel(chan->count >> calc_xmit_shift(chan),
 		(dma_base_addr[chan->chan] + TCR));
 
 	sh_dmac_enable_dma(chan);
@@ -198,10 +201,10 @@
 
 static int sh_dmac_get_dma_residue(struct dma_channel *chan)
 {
-	if (!(ctrl_inl(dma_base_addr[chan->chan] + CHCR) & CHCR_DE))
+	if (!(__raw_readl(dma_base_addr[chan->chan] + CHCR) & CHCR_DE))
 		return 0;
 
-	return ctrl_inl(dma_base_addr[chan->chan] + TCR)
+	return __raw_readl(dma_base_addr[chan->chan] + TCR)
 		 << calc_xmit_shift(chan);
 }
 
diff --git a/arch/sh/drivers/dma/dmabrg.c b/arch/sh/drivers/dma/dmabrg.c
index 5e22689..72622e3 100644
--- a/arch/sh/drivers/dma/dmabrg.c
+++ b/arch/sh/drivers/dma/dmabrg.c
@@ -86,8 +86,8 @@
 	unsigned long dcr;
 	unsigned int i;
 
-	dcr = ctrl_inl(DMABRGCR);
-	ctrl_outl(dcr & ~0x00ff0003, DMABRGCR);	/* ack all */
+	dcr = __raw_readl(DMABRGCR);
+	__raw_writel(dcr & ~0x00ff0003, DMABRGCR);	/* ack all */
 	dcr &= dcr >> 8;	/* ignore masked */
 
 	/* USB stuff, get it out of the way first */
@@ -109,17 +109,17 @@
 static void dmabrg_disable_irq(unsigned int dmairq)
 {
 	unsigned long dcr;
-	dcr = ctrl_inl(DMABRGCR);
+	dcr = __raw_readl(DMABRGCR);
 	dcr &= ~(1 << ((dmairq > 1) ? dmairq + 22 : dmairq + 8));
-	ctrl_outl(dcr, DMABRGCR);
+	__raw_writel(dcr, DMABRGCR);
 }
 
 static void dmabrg_enable_irq(unsigned int dmairq)
 {
 	unsigned long dcr;
-	dcr = ctrl_inl(DMABRGCR);
+	dcr = __raw_readl(DMABRGCR);
 	dcr |= (1 << ((dmairq > 1) ? dmairq + 22 : dmairq + 8));
-	ctrl_outl(dcr, DMABRGCR);
+	__raw_writel(dcr, DMABRGCR);
 }
 
 int dmabrg_request_irq(unsigned int dmairq, void(*handler)(void*),
@@ -165,13 +165,13 @@
 		printk(KERN_INFO "DMABRG: DMAC ch0 not reserved!\n");
 #endif
 
-	ctrl_outl(0, DMABRGCR);
-	ctrl_outl(0, DMACHCR0);
-	ctrl_outl(0x94000000, DMARSRA);	/* enable DMABRG in DMAC 0 */
+	__raw_writel(0, DMABRGCR);
+	__raw_writel(0, DMACHCR0);
+	__raw_writel(0x94000000, DMARSRA);	/* enable DMABRG in DMAC 0 */
 
 	/* enable DMABRG mode, enable the DMAC */
-	or = ctrl_inl(DMAOR);
-	ctrl_outl(or | DMAOR_BRG | DMAOR_DMEN, DMAOR);
+	or = __raw_readl(DMAOR);
+	__raw_writel(or | DMAOR_BRG | DMAOR_DMEN, DMAOR);
 
 	ret = request_irq(DMABRGI0, dmabrg_irq, IRQF_DISABLED,
 			"DMABRG USB address error", NULL);
diff --git a/arch/sh/drivers/heartbeat.c b/arch/sh/drivers/heartbeat.c
index a9339a6..2acbc79 100644
--- a/arch/sh/drivers/heartbeat.c
+++ b/arch/sh/drivers/heartbeat.c
@@ -1,7 +1,7 @@
 /*
  * Generic heartbeat driver for regular LED banks
  *
- * Copyright (C) 2007  Paul Mundt
+ * Copyright (C) 2007 - 2010  Paul Mundt
  *
  * Most SH reference boards include a number of individual LEDs that can
  * be independently controlled (either via a pre-defined hardware
@@ -27,7 +27,7 @@
 #include <asm/heartbeat.h>
 
 #define DRV_NAME "heartbeat"
-#define DRV_VERSION "0.1.1"
+#define DRV_VERSION "0.1.2"
 
 static unsigned char default_bit_pos[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
 
@@ -98,7 +98,7 @@
 			return -ENOMEM;
 	}
 
-	hd->base = ioremap_nocache(res->start, res->end - res->start + 1);
+	hd->base = ioremap_nocache(res->start, resource_size(res));
 	if (unlikely(!hd->base)) {
 		dev_err(&pdev->dev, "ioremap failed\n");
 
@@ -117,8 +117,20 @@
 	for (i = 0; i < hd->nr_bits; i++)
 		hd->mask |= (1 << hd->bit_pos[i]);
 
-	if (!hd->regsize)
-		hd->regsize = 8;	/* default access size */
+	if (!hd->regsize) {
+		switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
+		case IORESOURCE_MEM_32BIT:
+			hd->regsize = 32;
+			break;
+		case IORESOURCE_MEM_16BIT:
+			hd->regsize = 16;
+			break;
+		case IORESOURCE_MEM_8BIT:
+		default:
+			hd->regsize = 8;
+			break;
+		}
+	}
 
 	setup_timer(&hd->timer, heartbeat_timer, (unsigned long)hd);
 	platform_set_drvdata(pdev, hd);
diff --git a/arch/sh/drivers/pci/Makefile b/arch/sh/drivers/pci/Makefile
index 08af1f4..4a59e68 100644
--- a/arch/sh/drivers/pci/Makefile
+++ b/arch/sh/drivers/pci/Makefile
@@ -1,14 +1,14 @@
 #
 # Makefile for the PCI specific kernel interface routines under Linux.
 #
-obj-y					+= pci.o
+obj-y					+= common.o pci.o
 
 obj-$(CONFIG_CPU_SUBTYPE_SH7751)	+= pci-sh7751.o ops-sh4.o
 obj-$(CONFIG_CPU_SUBTYPE_SH7751R)	+= pci-sh7751.o ops-sh4.o
 obj-$(CONFIG_CPU_SUBTYPE_SH7763)	+= pci-sh7780.o ops-sh4.o
 obj-$(CONFIG_CPU_SUBTYPE_SH7780)	+= pci-sh7780.o ops-sh4.o
 obj-$(CONFIG_CPU_SUBTYPE_SH7785)	+= pci-sh7780.o ops-sh4.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7786)	+= ops-sh7786.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7786)	+= pcie-sh7786.o ops-sh7786.o
 obj-$(CONFIG_CPU_SH5)			+= pci-sh5.o ops-sh5.o
 
 obj-$(CONFIG_SH_DREAMCAST)		+= ops-dreamcast.o fixups-dreamcast.o \
@@ -25,4 +25,3 @@
 obj-$(CONFIG_SH_LANDISK)		+= fixups-landisk.o
 obj-$(CONFIG_SH_LBOX_RE2)		+= fixups-rts7751r2d.o
 obj-$(CONFIG_SH_CAYMAN)			+= fixups-cayman.o
-obj-$(CONFIG_SH_URQUELL)		+= pcie-sh7786.o
diff --git a/arch/sh/drivers/pci/common.c b/arch/sh/drivers/pci/common.c
new file mode 100644
index 0000000..dbf1381
--- /dev/null
+++ b/arch/sh/drivers/pci/common.c
@@ -0,0 +1,162 @@
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+/*
+ * These functions are used early on before PCI scanning is done
+ * and all of the pci_dev and pci_bus structures have been created.
+ */
+static struct pci_dev *fake_pci_dev(struct pci_channel *hose,
+	int top_bus, int busnr, int devfn)
+{
+	static struct pci_dev dev;
+	static struct pci_bus bus;
+
+	dev.bus = &bus;
+	dev.sysdata = hose;
+	dev.devfn = devfn;
+	bus.number = busnr;
+	bus.sysdata = hose;
+	bus.ops = hose->pci_ops;
+
+	if(busnr != top_bus)
+		/* Fake a parent bus structure. */
+		bus.parent = &bus;
+	else
+		bus.parent = NULL;
+
+	return &dev;
+}
+
+#define EARLY_PCI_OP(rw, size, type)					\
+int __init early_##rw##_config_##size(struct pci_channel *hose,		\
+	int top_bus, int bus, int devfn, int offset, type value)	\
+{									\
+	return pci_##rw##_config_##size(				\
+		fake_pci_dev(hose, top_bus, bus, devfn),		\
+		offset, value);						\
+}
+
+EARLY_PCI_OP(read, byte, u8 *)
+EARLY_PCI_OP(read, word, u16 *)
+EARLY_PCI_OP(read, dword, u32 *)
+EARLY_PCI_OP(write, byte, u8)
+EARLY_PCI_OP(write, word, u16)
+EARLY_PCI_OP(write, dword, u32)
+
+int __init pci_is_66mhz_capable(struct pci_channel *hose,
+				int top_bus, int current_bus)
+{
+	u32 pci_devfn;
+	unsigned short vid;
+	int cap66 = -1;
+	u16 stat;
+
+	printk(KERN_INFO "PCI: Checking 66MHz capabilities...\n");
+
+	for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) {
+		if (PCI_FUNC(pci_devfn))
+			continue;
+		if (early_read_config_word(hose, top_bus, current_bus,
+					   pci_devfn, PCI_VENDOR_ID, &vid) !=
+		    PCIBIOS_SUCCESSFUL)
+			continue;
+		if (vid == 0xffff)
+			continue;
+
+		/* check 66MHz capability */
+		if (cap66 < 0)
+			cap66 = 1;
+		if (cap66) {
+			early_read_config_word(hose, top_bus, current_bus,
+					       pci_devfn, PCI_STATUS, &stat);
+			if (!(stat & PCI_STATUS_66MHZ)) {
+				printk(KERN_DEBUG
+				       "PCI: %02x:%02x not 66MHz capable.\n",
+				       current_bus, pci_devfn);
+				cap66 = 0;
+				break;
+			}
+		}
+	}
+
+	return cap66 > 0;
+}
+
+static void pcibios_enable_err(unsigned long __data)
+{
+	struct pci_channel *hose = (struct pci_channel *)__data;
+
+	del_timer(&hose->err_timer);
+	printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n");
+	enable_irq(hose->err_irq);
+}
+
+static void pcibios_enable_serr(unsigned long __data)
+{
+	struct pci_channel *hose = (struct pci_channel *)__data;
+
+	del_timer(&hose->serr_timer);
+	printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n");
+	enable_irq(hose->serr_irq);
+}
+
+void pcibios_enable_timers(struct pci_channel *hose)
+{
+	if (hose->err_irq) {
+		init_timer(&hose->err_timer);
+		hose->err_timer.data = (unsigned long)hose;
+		hose->err_timer.function = pcibios_enable_err;
+	}
+
+	if (hose->serr_irq) {
+		init_timer(&hose->serr_timer);
+		hose->serr_timer.data = (unsigned long)hose;
+		hose->serr_timer.function = pcibios_enable_serr;
+	}
+}
+
+/*
+ * A simple handler for the regular PCI status errors, called from IRQ
+ * context.
+ */
+unsigned int pcibios_handle_status_errors(unsigned long addr,
+					  unsigned int status,
+					  struct pci_channel *hose)
+{
+	unsigned int cmd = 0;
+
+	if (status & PCI_STATUS_REC_MASTER_ABORT) {
+		printk(KERN_DEBUG "PCI: master abort, pc=0x%08lx\n", addr);
+		cmd |= PCI_STATUS_REC_MASTER_ABORT;
+	}
+
+	if (status & PCI_STATUS_REC_TARGET_ABORT) {
+		printk(KERN_DEBUG "PCI: target abort: ");
+		pcibios_report_status(PCI_STATUS_REC_TARGET_ABORT |
+				      PCI_STATUS_SIG_TARGET_ABORT |
+				      PCI_STATUS_REC_MASTER_ABORT, 1);
+		printk("\n");
+
+		cmd |= PCI_STATUS_REC_TARGET_ABORT;
+	}
+
+	if (status & (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY)) {
+		printk(KERN_DEBUG "PCI: parity error detected: ");
+		pcibios_report_status(PCI_STATUS_PARITY |
+				      PCI_STATUS_DETECTED_PARITY, 1);
+		printk("\n");
+
+		cmd |= PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY;
+
+		/* Now back off of the IRQ for awhile */
+		if (hose->err_irq) {
+			disable_irq_nosync(hose->err_irq);
+			hose->err_timer.expires = jiffies + HZ;
+			add_timer(&hose->err_timer);
+		}
+	}
+
+	return cmd;
+}
diff --git a/arch/sh/drivers/pci/fixups-dreamcast.c b/arch/sh/drivers/pci/fixups-dreamcast.c
index ed7f489..942ef4f 100644
--- a/arch/sh/drivers/pci/fixups-dreamcast.c
+++ b/arch/sh/drivers/pci/fixups-dreamcast.c
@@ -39,7 +39,7 @@
 		/*
 		 * We also assume that dev->devfn == 0
 		 */
-		dev->resource[1].start	= p->io_resource->start  + 0x100;
+		dev->resource[1].start	= p->resources[0].start  + 0x100;
 		dev->resource[1].end	= dev->resource[1].start + 0x200 - 1;
 
 		/*
diff --git a/arch/sh/drivers/pci/fixups-r7780rp.c b/arch/sh/drivers/pci/fixups-r7780rp.c
index 15ca65c..08b2d86 100644
--- a/arch/sh/drivers/pci/fixups-r7780rp.c
+++ b/arch/sh/drivers/pci/fixups-r7780rp.c
@@ -22,15 +22,3 @@
 {
 	return irq_tab[slot];
 }
-
-int pci_fixup_pcic(struct pci_channel *chan)
-{
-	pci_write_reg(chan, 0x000043ff, SH4_PCIINTM);
-	pci_write_reg(chan, 0x00000000, SH7780_PCIIBAR);
-	pci_write_reg(chan, 0x08000000, SH7780_PCICSCR0);
-	pci_write_reg(chan, 0x0000001b, SH7780_PCICSAR0);
-	pci_write_reg(chan, 0xfd000000, SH7780_PCICSCR1);
-	pci_write_reg(chan, 0x0000000f, SH7780_PCICSAR1);
-
-	return 0;
-}
diff --git a/arch/sh/drivers/pci/fixups-rts7751r2d.c b/arch/sh/drivers/pci/fixups-rts7751r2d.c
index 7898f14..e248516 100644
--- a/arch/sh/drivers/pci/fixups-rts7751r2d.c
+++ b/arch/sh/drivers/pci/fixups-rts7751r2d.c
@@ -43,7 +43,7 @@
 {
 	unsigned long bcr1, mcr;
 
-	bcr1 = ctrl_inl(SH7751_BCR1);
+	bcr1 = __raw_readl(SH7751_BCR1);
 	bcr1 |= 0x40080000;	/* Enable Bit 19 BREQEN, set PCIC to slave */
 	pci_write_reg(chan, bcr1, SH4_PCIBCR1);
 
@@ -54,7 +54,7 @@
 	pci_write_reg(chan, 0xfb900047, SH7751_PCICONF1);
 	pci_write_reg(chan, 0xab000001, SH7751_PCICONF4);
 
-	mcr = ctrl_inl(SH7751_MCR);
+	mcr = __raw_readl(SH7751_MCR);
 	mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF;
 	pci_write_reg(chan, mcr, SH4_PCIMCR);
 
diff --git a/arch/sh/drivers/pci/fixups-sdk7780.c b/arch/sh/drivers/pci/fixups-sdk7780.c
index 250b0ed..0930f98 100644
--- a/arch/sh/drivers/pci/fixups-sdk7780.c
+++ b/arch/sh/drivers/pci/fixups-sdk7780.c
@@ -31,22 +31,3 @@
 {
        return sdk7780_irq_tab[pin-1][slot];
 }
-int pci_fixup_pcic(struct pci_channel *chan)
-{
-	/* Enable all interrupts, so we know what to fix */
-	pci_write_reg(chan, 0x0000C3FF, SH7780_PCIIMR);
-
-	/* Set up standard PCI config registers */
-	pci_write_reg(chan, 0x08000000, SH7780_PCIMBAR0);	/* PCI */
-	pci_write_reg(chan, 0x08000000, SH4_PCILAR0);	/* SHwy */
-	pci_write_reg(chan, 0x07F00001, SH4_PCILSR0);	/* size 128M w/ MBAR */
-
-	pci_write_reg(chan, 0x00000000, SH7780_PCIMBAR1);
-	pci_write_reg(chan, 0x00000000, SH4_PCILAR1);
-	pci_write_reg(chan, 0x00000000, SH4_PCILSR1);
-
-	pci_write_reg(chan, 0xAB000801, SH7780_PCIIBAR);
-	pci_write_reg(chan, 0xA5000C01, SH4_PCICR);
-
-	return 0;
-}
diff --git a/arch/sh/drivers/pci/fixups-se7751.c b/arch/sh/drivers/pci/fixups-se7751.c
index 475fa9f..a4c7d3a 100644
--- a/arch/sh/drivers/pci/fixups-se7751.c
+++ b/arch/sh/drivers/pci/fixups-se7751.c
@@ -97,12 +97,12 @@
 	* meaning all calls go straight through... use BUG_ON to
 	* catch erroneous assumption.
 	*/
-	BUG_ON(chan->mem_resource->start != SH7751_PCI_MEMORY_BASE);
+	BUG_ON(chan->resources[1].start != SH7751_PCI_MEMORY_BASE);
 
-	PCIC_WRITE(SH7751_PCIMBR, chan->mem_resource->start);
+	PCIC_WRITE(SH7751_PCIMBR, chan->resources[1].start);
 
 	/* Set IOBR for window containing area specified in pci.h */
-	PCIC_WRITE(SH7751_PCIIOBR, (chan->io_resource->start & SH7751_PCIIOBR_MASK));
+	PCIC_WRITE(SH7751_PCIIOBR, (chan->resources[0].start & SH7751_PCIIOBR_MASK));
 
 	/* All done, may as well say so... */
 	printk("SH7751 PCI: Finished initialization of the PCI controller\n");
diff --git a/arch/sh/drivers/pci/ops-sh4.c b/arch/sh/drivers/pci/ops-sh4.c
index 78bebeb..0b81999 100644
--- a/arch/sh/drivers/pci/ops-sh4.c
+++ b/arch/sh/drivers/pci/ops-sh4.c
@@ -16,7 +16,7 @@
  * Direct access to PCI hardware...
  */
 #define CONFIG_CMD(bus, devfn, where) \
-	(P1SEG | (bus->number << 16) | (devfn << 8) | (where & ~3))
+	(0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3))
 
 static DEFINE_SPINLOCK(sh4_pci_lock);
 
@@ -102,34 +102,6 @@
 	.write		= sh4_pci_write,
 };
 
-/*
- * Not really related to pci_ops, but it's common and not worth shoving
- * somewhere else for now..
- */
-int __init sh4_pci_check_direct(struct pci_channel *chan)
-{
-	/*
-	 * Check if configuration works.
-	 */
-	unsigned int tmp = pci_read_reg(chan, SH4_PCIPAR);
-
-	pci_write_reg(chan, P1SEG, SH4_PCIPAR);
-
-	if (pci_read_reg(chan, SH4_PCIPAR) == P1SEG) {
-		pci_write_reg(chan, tmp, SH4_PCIPAR);
-		printk(KERN_INFO "PCI: Using configuration type 1\n");
-		request_region(chan->reg_base + SH4_PCIPAR, 8,
-			       "PCI conf1");
-		return 0;
-	}
-
-	pci_write_reg(chan, tmp, SH4_PCIPAR);
-
-	printk(KERN_ERR "PCI: %s failed\n", __func__);
-
-	return -EINVAL;
-}
-
 int __attribute__((weak)) pci_fixup_pcic(struct pci_channel *chan)
 {
 	/* Nothing to do. */
diff --git a/arch/sh/drivers/pci/pci-dreamcast.c b/arch/sh/drivers/pci/pci-dreamcast.c
index 210f9d4..6336941 100644
--- a/arch/sh/drivers/pci/pci-dreamcast.c
+++ b/arch/sh/drivers/pci/pci-dreamcast.c
@@ -25,25 +25,25 @@
 #include <asm/irq.h>
 #include <mach/pci.h>
 
-static struct resource gapspci_io_resource = {
-	.name	= "GAPSPCI IO",
-	.start	= GAPSPCI_BBA_CONFIG,
-	.end	= GAPSPCI_BBA_CONFIG + GAPSPCI_BBA_CONFIG_SIZE - 1,
-	.flags	= IORESOURCE_IO,
-};
-
-static struct resource gapspci_mem_resource = {
-	.name	= "GAPSPCI mem",
-	.start	= GAPSPCI_DMA_BASE,
-	.end	= GAPSPCI_DMA_BASE + GAPSPCI_DMA_SIZE - 1,
-	.flags	= IORESOURCE_MEM,
+static struct resource gapspci_resources[] = {
+	{
+		.name	= "GAPSPCI IO",
+		.start	= GAPSPCI_BBA_CONFIG,
+		.end	= GAPSPCI_BBA_CONFIG + GAPSPCI_BBA_CONFIG_SIZE - 1,
+		.flags	= IORESOURCE_IO,
+	},  {
+		.name	= "GAPSPCI mem",
+		.start	= GAPSPCI_DMA_BASE,
+		.end	= GAPSPCI_DMA_BASE + GAPSPCI_DMA_SIZE - 1,
+		.flags	= IORESOURCE_MEM,
+	},
 };
 
 static struct pci_channel dreamcast_pci_controller = {
 	.pci_ops	= &gapspci_pci_ops,
-	.io_resource	= &gapspci_io_resource,
+	.resources	= gapspci_resources,
+	.nr_resources	= ARRAY_SIZE(gapspci_resources),
 	.io_offset	= 0x00000000,
-	.mem_resource	= &gapspci_mem_resource,
 	.mem_offset	= 0x00000000,
 };
 
@@ -95,8 +95,6 @@
 	outl(0x00002001, GAPSPCI_BBA_CONFIG+0x10);
 	outl(0x01000000, GAPSPCI_BBA_CONFIG+0x14);
 
-	register_pci_controller(&dreamcast_pci_controller);
-
-	return 0;
+	return register_pci_controller(&dreamcast_pci_controller);
 }
 arch_initcall(gapspci_init);
diff --git a/arch/sh/drivers/pci/pci-sh4.h b/arch/sh/drivers/pci/pci-sh4.h
index 3d5296c..cbf763b 100644
--- a/arch/sh/drivers/pci/pci-sh4.h
+++ b/arch/sh/drivers/pci/pci-sh4.h
@@ -49,6 +49,17 @@
   #define SH4_PCIINT_MWPD	  0x00000002	/* Master Write PERR Detect */
   #define SH4_PCIINT_MRPD	  0x00000001	/* Master Read PERR Detect */
 #define SH4_PCIINTM		0x118		/* PCI Interrupt Mask */
+  #define SH4_PCIINTM_TTADIM	  BIT(14)	/* Target-target abort interrupt */
+  #define SH4_PCIINTM_TMTOIM	  BIT(9)	/* Target retry timeout */
+  #define SH4_PCIINTM_MDEIM	  BIT(8)	/* Master function disable error */
+  #define SH4_PCIINTM_APEDIM	  BIT(7)	/* Address parity error detection */
+  #define SH4_PCIINTM_SDIM	  BIT(6)	/* SERR detection */
+  #define SH4_PCIINTM_DPEITWM	  BIT(5)	/* Data parity error for target write */
+  #define SH4_PCIINTM_PEDITRM	  BIT(4)	/* PERR detection for target read */
+  #define SH4_PCIINTM_TADIMM	  BIT(3)	/* Target abort for master */
+  #define SH4_PCIINTM_MADIMM	  BIT(2)	/* Master abort for master */
+  #define SH4_PCIINTM_MWPDIM	  BIT(1)	/* Master write data parity error */
+  #define SH4_PCIINTM_MRDPEIM	  BIT(0)	/* Master read data parity error */
 #define SH4_PCIALR		0x11C		/* Error Address Register */
 #define SH4_PCICLR		0x120		/* Error Command/Data */
   #define SH4_PCICLR_MPIO	  0x80000000
@@ -61,7 +72,7 @@
 #define SH4_PCIAINT		0x130		/* Arbiter Interrupt Register */
   #define SH4_PCIAINT_MBKN	  0x00002000	/* Master Broken Interrupt */
   #define SH4_PCIAINT_TBTO	  0x00001000	/* Target Bus Time Out */
-  #define SH4_PCIAINT_MBTO	  0x00001000	/* Master Bus Time Out */
+  #define SH4_PCIAINT_MBTO	  0x00000800	/* Master Bus Time Out */
   #define SH4_PCIAINT_TABT	  0x00000008	/* Target Abort */
   #define SH4_PCIAINT_MABT	  0x00000004	/* Master Abort */
   #define SH4_PCIAINT_RDPE	  0x00000002	/* Read Data Parity Error */
@@ -151,7 +162,6 @@
 
 /* arch/sh/kernel/drivers/pci/ops-sh4.c */
 extern struct pci_ops sh4_pci_ops;
-int sh4_pci_check_direct(struct pci_channel *chan);
 int pci_fixup_pcic(struct pci_channel *chan);
 
 struct sh4_pci_address_space {
@@ -167,13 +177,13 @@
 static inline void pci_write_reg(struct pci_channel *chan,
 				 unsigned long val, unsigned long reg)
 {
-	ctrl_outl(val, chan->reg_base + reg);
+	__raw_writel(val, chan->reg_base + reg);
 }
 
 static inline unsigned long pci_read_reg(struct pci_channel *chan,
 					 unsigned long reg)
 {
-	return ctrl_inl(chan->reg_base + reg);
+	return __raw_readl(chan->reg_base + reg);
 }
 
 #endif /* __PCI_SH4_H */
diff --git a/arch/sh/drivers/pci/pci-sh5.c b/arch/sh/drivers/pci/pci-sh5.c
index 873ed2b..0bf296c 100644
--- a/arch/sh/drivers/pci/pci-sh5.c
+++ b/arch/sh/drivers/pci/pci-sh5.c
@@ -89,14 +89,13 @@
 	return IRQ_NONE;
 }
 
-static struct resource sh5_io_resource = { /* place holder */ };
-static struct resource sh5_mem_resource = { /* place holder */ };
+static struct resource sh5_pci_resources[2];
 
 static struct pci_channel sh5pci_controller = {
 	.pci_ops		= &sh5_pci_ops,
-	.mem_resource		= &sh5_mem_resource,
+	.resources		= sh5_pci_resources,
+	.nr_resources		= ARRAY_SIZE(sh5_pci_resources),
 	.mem_offset		= 0x00000000,
-	.io_resource		= &sh5_io_resource,
 	.io_offset		= 0x00000000,
 };
 
@@ -210,14 +209,12 @@
         SH5PCI_WRITE(AINTM, ~0);
         SH5PCI_WRITE(PINTM, ~0);
 
-	sh5_io_resource.start = PCI_IO_AREA;
-	sh5_io_resource.end = PCI_IO_AREA + 0x10000;
+	sh5_pci_resources[0].start = PCI_IO_AREA;
+	sh5_pci_resources[0].end = PCI_IO_AREA + 0x10000;
 
-	sh5_mem_resource.start = memStart;
-	sh5_mem_resource.end = memStart + memSize;
+	sh5_pci_resources[1].start = memStart;
+	sh5_pci_resources[1].end = memStart + memSize;
 
-	register_pci_controller(&sh5pci_controller);
-
-	return 0;
+	return register_pci_controller(&sh5pci_controller);
 }
 arch_initcall(sh5pci_init);
diff --git a/arch/sh/drivers/pci/pci-sh5.h b/arch/sh/drivers/pci/pci-sh5.h
index f277628..3f01dec 100644
--- a/arch/sh/drivers/pci/pci-sh5.h
+++ b/arch/sh/drivers/pci/pci-sh5.h
@@ -86,14 +86,14 @@
 /* #define PCISH5_VCR_REG(x)                ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */
 
 /* Write I/O functions */
-#define SH5PCI_WRITE(reg,val)        ctrl_outl((u32)(val),PCISH5_ICR_REG(reg))
-#define SH5PCI_WRITE_SHORT(reg,val)  ctrl_outw((u16)(val),PCISH5_ICR_REG(reg))
-#define SH5PCI_WRITE_BYTE(reg,val)   ctrl_outb((u8)(val),PCISH5_ICR_REG(reg))
+#define SH5PCI_WRITE(reg,val)        __raw_writel((u32)(val),PCISH5_ICR_REG(reg))
+#define SH5PCI_WRITE_SHORT(reg,val)  __raw_writew((u16)(val),PCISH5_ICR_REG(reg))
+#define SH5PCI_WRITE_BYTE(reg,val)   __raw_writeb((u8)(val),PCISH5_ICR_REG(reg))
 
 /* Read I/O functions */
-#define SH5PCI_READ(reg)             ctrl_inl(PCISH5_ICR_REG(reg))
-#define SH5PCI_READ_SHORT(reg)       ctrl_inw(PCISH5_ICR_REG(reg))
-#define SH5PCI_READ_BYTE(reg)        ctrl_inb(PCISH5_ICR_REG(reg))
+#define SH5PCI_READ(reg)             __raw_readl(PCISH5_ICR_REG(reg))
+#define SH5PCI_READ_SHORT(reg)       __raw_readw(PCISH5_ICR_REG(reg))
+#define SH5PCI_READ_BYTE(reg)        __raw_readb(PCISH5_ICR_REG(reg))
 
 /* Set PCI config bits */
 #define SET_CONFIG_BITS(bus,devfn,where)  ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000)
diff --git a/arch/sh/drivers/pci/pci-sh7751.c b/arch/sh/drivers/pci/pci-sh7751.c
index 70c1999..17811e5 100644
--- a/arch/sh/drivers/pci/pci-sh7751.c
+++ b/arch/sh/drivers/pci/pci-sh7751.c
@@ -44,25 +44,25 @@
 	return 1;
 }
 
-static struct resource sh7751_io_resource = {
-	.name	= "SH7751_IO",
-	.start	= SH7751_PCI_IO_BASE,
-	.end	= SH7751_PCI_IO_BASE + SH7751_PCI_IO_SIZE - 1,
-	.flags	= IORESOURCE_IO
-};
-
-static struct resource sh7751_mem_resource = {
-	.name	= "SH7751_mem",
-	.start	= SH7751_PCI_MEMORY_BASE,
-	.end	= SH7751_PCI_MEMORY_BASE + SH7751_PCI_MEM_SIZE - 1,
-	.flags	= IORESOURCE_MEM
+static struct resource sh7751_pci_resources[] = {
+	{
+		.name	= "SH7751_IO",
+		.start	= SH7751_PCI_IO_BASE,
+		.end	= SH7751_PCI_IO_BASE + SH7751_PCI_IO_SIZE - 1,
+		.flags	= IORESOURCE_IO
+	}, {
+		.name	= "SH7751_mem",
+		.start	= SH7751_PCI_MEMORY_BASE,
+		.end	= SH7751_PCI_MEMORY_BASE + SH7751_PCI_MEM_SIZE - 1,
+		.flags	= IORESOURCE_MEM
+	},
 };
 
 static struct pci_channel sh7751_pci_controller = {
 	.pci_ops	= &sh4_pci_ops,
-	.mem_resource	= &sh7751_mem_resource,
+	.resources	= sh7751_pci_resources,
+	.nr_resources	= ARRAY_SIZE(sh7751_pci_resources),
 	.mem_offset	= 0x00000000,
-	.io_resource	= &sh7751_io_resource,
 	.io_offset	= 0x00000000,
 	.io_map_base	= SH7751_PCI_IO_BASE,
 };
@@ -79,7 +79,6 @@
 	struct pci_channel *chan = &sh7751_pci_controller;
 	unsigned int id;
 	u32 word, reg;
-	int ret;
 
 	printk(KERN_NOTICE "PCI: Starting intialization.\n");
 
@@ -93,13 +92,10 @@
 		return -ENODEV;
 	}
 
-	if ((ret = sh4_pci_check_direct(chan)) != 0)
-		return ret;
-
 	/* Set the BCR's to enable PCI access */
-	reg = ctrl_inl(SH7751_BCR1);
+	reg = __raw_readl(SH7751_BCR1);
 	reg |= 0x80000;
-	ctrl_outl(reg, SH7751_BCR1);
+	__raw_writel(reg, SH7751_BCR1);
 
 	/* Turn the clocks back on (not done in reset)*/
 	pci_write_reg(chan, 0, SH4_PCICLKR);
@@ -132,13 +128,13 @@
 	/* Set the local 16MB PCI memory space window to
 	 * the lowest PCI mapped address
 	 */
-	word = chan->mem_resource->start & SH4_PCIMBR_MASK;
+	word = chan->resources[1].start & SH4_PCIMBR_MASK;
 	pr_debug("PCI: Setting upper bits of Memory window to 0x%x\n", word);
 	pci_write_reg(chan, word , SH4_PCIMBR);
 
 	/* Make sure the MSB's of IO window are set to access PCI space
 	 * correctly */
-	word = chan->io_resource->start & SH4_PCIIOBR_MASK;
+	word = chan->resources[0].start & SH4_PCIIOBR_MASK;
 	pr_debug("PCI: Setting upper bits of IO window to 0x%x\n", word);
 	pci_write_reg(chan, word, SH4_PCIIOBR);
 
@@ -159,13 +155,13 @@
 		return -1;
 
 	/* configure the wait control registers */
-	word = ctrl_inl(SH7751_WCR1);
+	word = __raw_readl(SH7751_WCR1);
 	pci_write_reg(chan, word, SH4_PCIWCR1);
-	word = ctrl_inl(SH7751_WCR2);
+	word = __raw_readl(SH7751_WCR2);
 	pci_write_reg(chan, word, SH4_PCIWCR2);
-	word = ctrl_inl(SH7751_WCR3);
+	word = __raw_readl(SH7751_WCR3);
 	pci_write_reg(chan, word, SH4_PCIWCR3);
-	word = ctrl_inl(SH7751_MCR);
+	word = __raw_readl(SH7751_MCR);
 	pci_write_reg(chan, word, SH4_PCIMCR);
 
 	/* NOTE: I'm ignoring the PCI error IRQs for now..
@@ -180,8 +176,6 @@
 	word = SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_ARBM;
 	pci_write_reg(chan, word, SH4_PCICR);
 
-	register_pci_controller(chan);
-
-	return 0;
+	return register_pci_controller(chan);
 }
 arch_initcall(sh7751_pci_init);
diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c
index 323b92d..ffdcbf1 100644
--- a/arch/sh/drivers/pci/pci-sh7780.c
+++ b/arch/sh/drivers/pci/pci-sh7780.c
@@ -1,7 +1,7 @@
 /*
  * Low-Level PCI Support for the SH7780
  *
- *  Copyright (C) 2005 - 2009  Paul Mundt
+ *  Copyright (C) 2005 - 2010  Paul Mundt
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -11,52 +11,240 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/irq.h>
 #include <linux/errno.h>
 #include <linux/delay.h>
+#include <linux/log2.h>
 #include "pci-sh4.h"
+#include <asm/mmu.h>
+#include <asm/sizes.h>
 
-static struct resource sh7785_io_resource = {
-	.name	= "SH7785_IO",
-	.start	= SH7780_PCI_IO_BASE,
-	.end	= SH7780_PCI_IO_BASE + SH7780_PCI_IO_SIZE - 1,
-	.flags	= IORESOURCE_IO
-};
-
-static struct resource sh7785_mem_resource = {
-	.name	= "SH7785_mem",
-	.start	= SH7780_PCI_MEMORY_BASE,
-	.end	= SH7780_PCI_MEMORY_BASE + SH7780_PCI_MEM_SIZE - 1,
-	.flags	= IORESOURCE_MEM
+static struct resource sh7785_pci_resources[] = {
+	{
+		.name	= "PCI IO",
+		.start	= 0x1000,
+		.end	= SZ_4M - 1,
+		.flags	= IORESOURCE_IO,
+	}, {
+		.name	= "PCI MEM 0",
+		.start	= 0xfd000000,
+		.end	= 0xfd000000 + SZ_16M - 1,
+		.flags	= IORESOURCE_MEM,
+	}, {
+		.name	= "PCI MEM 1",
+		.start	= 0x10000000,
+		.end	= 0x10000000 + SZ_64M - 1,
+		.flags	= IORESOURCE_MEM,
+	}, {
+		/*
+		 * 32-bit only resources must be last.
+		 */
+		.name	= "PCI MEM 2",
+		.start	= 0xc0000000,
+		.end	= 0xc0000000 + SZ_512M - 1,
+		.flags	= IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
+	},
 };
 
 static struct pci_channel sh7780_pci_controller = {
 	.pci_ops	= &sh4_pci_ops,
-	.mem_resource	= &sh7785_mem_resource,
-	.mem_offset	= 0x00000000,
-	.io_resource	= &sh7785_io_resource,
-	.io_offset	= 0x00000000,
-	.io_map_base	= SH7780_PCI_IO_BASE,
+	.resources	= sh7785_pci_resources,
+	.nr_resources	= ARRAY_SIZE(sh7785_pci_resources),
+	.io_offset	= 0,
+	.mem_offset	= 0,
+	.io_map_base	= 0xfe200000,
+	.serr_irq	= evt2irq(0xa00),
+	.err_irq	= evt2irq(0xaa0),
 };
 
-static struct sh4_pci_address_map sh7780_pci_map = {
-	.window0	= {
-#if defined(CONFIG_32BIT)
-		.base	= SH7780_32BIT_DDR_BASE_ADDR,
-		.size	= 0x40000000,
-#else
-		.base	= SH7780_CS0_BASE_ADDR,
-		.size	= 0x20000000,
-#endif
-	},
+struct pci_errors {
+	unsigned int	mask;
+	const char	*str;
+} pci_arbiter_errors[] = {
+	{ SH4_PCIAINT_MBKN,	"master broken" },
+	{ SH4_PCIAINT_TBTO,	"target bus time out" },
+	{ SH4_PCIAINT_MBTO,	"master bus time out" },
+	{ SH4_PCIAINT_TABT,	"target abort" },
+	{ SH4_PCIAINT_MABT,	"master abort" },
+	{ SH4_PCIAINT_RDPE,	"read data parity error" },
+	{ SH4_PCIAINT_WDPE,	"write data parity error" },
+}, pci_interrupt_errors[] = {
+	{ SH4_PCIINT_MLCK,	"master lock error" },
+	{ SH4_PCIINT_TABT,	"target-target abort" },
+	{ SH4_PCIINT_TRET,	"target retry time out" },
+	{ SH4_PCIINT_MFDE,	"master function disable erorr" },
+	{ SH4_PCIINT_PRTY,	"address parity error" },
+	{ SH4_PCIINT_SERR,	"SERR" },
+	{ SH4_PCIINT_TWDP,	"data parity error for target write" },
+	{ SH4_PCIINT_TRDP,	"PERR detected for target read" },
+	{ SH4_PCIINT_MTABT,	"target abort for master" },
+	{ SH4_PCIINT_MMABT,	"master abort for master" },
+	{ SH4_PCIINT_MWPD,	"master write data parity error" },
+	{ SH4_PCIINT_MRPD,	"master read data parity error" },
 };
 
+static irqreturn_t sh7780_pci_err_irq(int irq, void *dev_id)
+{
+	struct pci_channel *hose = dev_id;
+	unsigned long addr;
+	unsigned int status;
+	unsigned int cmd;
+	int i;
+
+	addr = __raw_readl(hose->reg_base + SH4_PCIALR);
+
+	/*
+	 * Handle status errors.
+	 */
+	status = __raw_readw(hose->reg_base + PCI_STATUS);
+	if (status & (PCI_STATUS_PARITY |
+		      PCI_STATUS_DETECTED_PARITY |
+		      PCI_STATUS_SIG_TARGET_ABORT |
+		      PCI_STATUS_REC_TARGET_ABORT |
+		      PCI_STATUS_REC_MASTER_ABORT)) {
+		cmd = pcibios_handle_status_errors(addr, status, hose);
+		if (likely(cmd))
+			__raw_writew(cmd, hose->reg_base + PCI_STATUS);
+	}
+
+	/*
+	 * Handle arbiter errors.
+	 */
+	status = __raw_readl(hose->reg_base + SH4_PCIAINT);
+	for (i = cmd = 0; i < ARRAY_SIZE(pci_arbiter_errors); i++) {
+		if (status & pci_arbiter_errors[i].mask) {
+			printk(KERN_DEBUG "PCI: %s, addr=%08lx\n",
+			       pci_arbiter_errors[i].str, addr);
+			cmd |= pci_arbiter_errors[i].mask;
+		}
+	}
+	__raw_writel(cmd, hose->reg_base + SH4_PCIAINT);
+
+	/*
+	 * Handle the remaining PCI errors.
+	 */
+	status = __raw_readl(hose->reg_base + SH4_PCIINT);
+	for (i = cmd = 0; i < ARRAY_SIZE(pci_interrupt_errors); i++) {
+		if (status & pci_interrupt_errors[i].mask) {
+			printk(KERN_DEBUG "PCI: %s, addr=%08lx\n",
+			       pci_interrupt_errors[i].str, addr);
+			cmd |= pci_interrupt_errors[i].mask;
+		}
+	}
+	__raw_writel(cmd, hose->reg_base + SH4_PCIINT);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t sh7780_pci_serr_irq(int irq, void *dev_id)
+{
+	struct pci_channel *hose = dev_id;
+
+	printk(KERN_DEBUG "PCI: system error received: ");
+	pcibios_report_status(PCI_STATUS_SIG_SYSTEM_ERROR, 1);
+	printk("\n");
+
+	/* Deassert SERR */
+	__raw_writel(SH4_PCIINTM_SDIM, hose->reg_base + SH4_PCIINTM);
+
+	/* Back off the IRQ for awhile */
+	disable_irq_nosync(irq);
+	hose->serr_timer.expires = jiffies + HZ;
+	add_timer(&hose->serr_timer);
+
+	return IRQ_HANDLED;
+}
+
+static int __init sh7780_pci_setup_irqs(struct pci_channel *hose)
+{
+	int ret;
+
+	/* Clear out PCI arbiter IRQs */
+	__raw_writel(0, hose->reg_base + SH4_PCIAINT);
+
+	/* Clear all error conditions */
+	__raw_writew(PCI_STATUS_DETECTED_PARITY  | \
+		     PCI_STATUS_SIG_SYSTEM_ERROR | \
+		     PCI_STATUS_REC_MASTER_ABORT | \
+		     PCI_STATUS_REC_TARGET_ABORT | \
+		     PCI_STATUS_SIG_TARGET_ABORT | \
+		     PCI_STATUS_PARITY, hose->reg_base + PCI_STATUS);
+
+	ret = request_irq(hose->serr_irq, sh7780_pci_serr_irq, IRQF_DISABLED,
+			  "PCI SERR interrupt", hose);
+	if (unlikely(ret)) {
+		printk(KERN_ERR "PCI: Failed hooking SERR IRQ\n");
+		return ret;
+	}
+
+	/*
+	 * The PCI ERR IRQ needs to be IRQF_SHARED since all of the power
+	 * down IRQ vectors are routed through the ERR IRQ vector. We
+	 * only request_irq() once as there is only a single masking
+	 * source for multiple events.
+	 */
+	ret = request_irq(hose->err_irq, sh7780_pci_err_irq, IRQF_SHARED,
+			  "PCI ERR interrupt", hose);
+	if (unlikely(ret)) {
+		free_irq(hose->serr_irq, hose);
+		return ret;
+	}
+
+	/* Unmask all of the arbiter IRQs. */
+	__raw_writel(SH4_PCIAINT_MBKN | SH4_PCIAINT_TBTO | SH4_PCIAINT_MBTO | \
+		     SH4_PCIAINT_TABT | SH4_PCIAINT_MABT | SH4_PCIAINT_RDPE | \
+		     SH4_PCIAINT_WDPE, hose->reg_base + SH4_PCIAINTM);
+
+	/* Unmask all of the PCI IRQs */
+	__raw_writel(SH4_PCIINTM_TTADIM  | SH4_PCIINTM_TMTOIM  | \
+		     SH4_PCIINTM_MDEIM   | SH4_PCIINTM_APEDIM  | \
+		     SH4_PCIINTM_SDIM    | SH4_PCIINTM_DPEITWM | \
+		     SH4_PCIINTM_PEDITRM | SH4_PCIINTM_TADIMM  | \
+		     SH4_PCIINTM_MADIMM  | SH4_PCIINTM_MWPDIM  | \
+		     SH4_PCIINTM_MRDPEIM, hose->reg_base + SH4_PCIINTM);
+
+	return ret;
+}
+
+static inline void __init sh7780_pci_teardown_irqs(struct pci_channel *hose)
+{
+	free_irq(hose->err_irq, hose);
+	free_irq(hose->serr_irq, hose);
+}
+
+static void __init sh7780_pci66_init(struct pci_channel *hose)
+{
+	unsigned int tmp;
+
+	if (!pci_is_66mhz_capable(hose, 0, 0))
+		return;
+
+	/* Enable register access */
+	tmp = __raw_readl(hose->reg_base + SH4_PCICR);
+	tmp |= SH4_PCICR_PREFIX;
+	__raw_writel(tmp, hose->reg_base + SH4_PCICR);
+
+	/* Enable 66MHz operation */
+	tmp = __raw_readw(hose->reg_base + PCI_STATUS);
+	tmp |= PCI_STATUS_66MHZ;
+	__raw_writew(tmp, hose->reg_base + PCI_STATUS);
+
+	/* Done */
+	tmp = __raw_readl(hose->reg_base + SH4_PCICR);
+	tmp |= SH4_PCICR_PREFIX | SH4_PCICR_CFIN;
+	__raw_writel(tmp, hose->reg_base + SH4_PCICR);
+}
+
 static int __init sh7780_pci_init(void)
 {
 	struct pci_channel *chan = &sh7780_pci_controller;
+	phys_addr_t memphys;
+	size_t memsize;
 	unsigned int id;
-	const char *type = NULL;
-	int ret;
-	u32 word;
+	const char *type;
+	int ret, i;
 
 	printk(KERN_NOTICE "PCI: Starting intialization.\n");
 
@@ -65,17 +253,28 @@
 	/* Enable CPU access to the PCIC registers. */
 	__raw_writel(PCIECR_ENBL, PCIECR);
 
-	id = __raw_readw(chan->reg_base + SH7780_PCIVID);
-	if (id != SH7780_VENDOR_ID) {
+	/* Reset */
+	__raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_PRST,
+		     chan->reg_base + SH4_PCICR);
+
+	/*
+	 * Wait for it to come back up. The spec says to allow for up to
+	 * 1 second after toggling the reset pin, but in practice 100ms
+	 * is more than enough.
+	 */
+	mdelay(100);
+
+	id = __raw_readw(chan->reg_base + PCI_VENDOR_ID);
+	if (id != PCI_VENDOR_ID_RENESAS) {
 		printk(KERN_ERR "PCI: Unknown vendor ID 0x%04x.\n", id);
 		return -ENODEV;
 	}
 
-	id = __raw_readw(chan->reg_base + SH7780_PCIDID);
-	type = (id == SH7763_DEVICE_ID)	? "SH7763" :
-	       (id == SH7780_DEVICE_ID) ? "SH7780" :
-	       (id == SH7781_DEVICE_ID) ? "SH7781" :
-	       (id == SH7785_DEVICE_ID) ? "SH7785" :
+	id = __raw_readw(chan->reg_base + PCI_DEVICE_ID);
+	type = (id == PCI_DEVICE_ID_RENESAS_SH7763) ? "SH7763" :
+	       (id == PCI_DEVICE_ID_RENESAS_SH7780) ? "SH7780" :
+	       (id == PCI_DEVICE_ID_RENESAS_SH7781) ? "SH7781" :
+	       (id == PCI_DEVICE_ID_RENESAS_SH7785) ? "SH7785" :
 					  NULL;
 	if (unlikely(!type)) {
 		printk(KERN_ERR "PCI: Found an unsupported Renesas host "
@@ -85,62 +284,119 @@
 
 	printk(KERN_NOTICE "PCI: Found a Renesas %s host "
 	       "controller, revision %d.\n", type,
-	       __raw_readb(chan->reg_base + SH7780_PCIRID));
+	       __raw_readb(chan->reg_base + PCI_REVISION_ID));
 
-	if ((ret = sh4_pci_check_direct(chan)) != 0)
+	/*
+	 * Now throw it in to register initialization mode and
+	 * start the real work.
+	 */
+	__raw_writel(SH4_PCICR_PREFIX, chan->reg_base + SH4_PCICR);
+
+	memphys = __pa(memory_start);
+	memsize = roundup_pow_of_two(memory_end - memory_start);
+
+	/*
+	 * If there's more than 512MB of memory, we need to roll over to
+	 * LAR1/LSR1.
+	 */
+	if (memsize > SZ_512M) {
+		__raw_writel(memphys + SZ_512M, chan->reg_base + SH4_PCILAR1);
+		__raw_writel((((memsize - SZ_512M) - SZ_1M) & 0x1ff00000) | 1,
+			     chan->reg_base + SH4_PCILSR1);
+		memsize = SZ_512M;
+	} else {
+		/*
+		 * Otherwise just zero it out and disable it.
+		 */
+		__raw_writel(0, chan->reg_base + SH4_PCILAR1);
+		__raw_writel(0, chan->reg_base + SH4_PCILSR1);
+	}
+
+	/*
+	 * LAR0/LSR0 covers up to the first 512MB, which is enough to
+	 * cover all of lowmem on most platforms.
+	 */
+	__raw_writel(memphys, chan->reg_base + SH4_PCILAR0);
+	__raw_writel(((memsize - SZ_1M) & 0x1ff00000) | 1,
+		     chan->reg_base + SH4_PCILSR0);
+
+	/*
+	 * Hook up the ERR and SERR IRQs.
+	 */
+	ret = sh7780_pci_setup_irqs(chan);
+	if (unlikely(ret))
 		return ret;
 
 	/*
-	 * Set the class and sub-class codes.
+	 * Disable the cache snoop controller for non-coherent DMA.
 	 */
-	__raw_writeb(PCI_CLASS_BRIDGE_HOST >> 8,
-		     chan->reg_base + SH7780_PCIBCC);
-	__raw_writeb(PCI_CLASS_BRIDGE_HOST & 0xff,
-		     chan->reg_base + SH7780_PCISUB);
+	__raw_writel(0, chan->reg_base + SH7780_PCICSCR0);
+	__raw_writel(0, chan->reg_base + SH7780_PCICSAR0);
+	__raw_writel(0, chan->reg_base + SH7780_PCICSCR1);
+	__raw_writel(0, chan->reg_base + SH7780_PCICSAR1);
 
 	/*
-	 * Set IO and Mem windows to local address
-	 * Make PCI and local address the same for easy 1 to 1 mapping
+	 * Setup the memory BARs
 	 */
-	pci_write_reg(chan, sh7780_pci_map.window0.size - 0xfffff, SH4_PCILSR0);
-	/* Set the values on window 0 PCI config registers */
-	pci_write_reg(chan, sh7780_pci_map.window0.base, SH4_PCILAR0);
-	pci_write_reg(chan, sh7780_pci_map.window0.base, SH7780_PCIMBAR0);
+	for (i = 1; i < chan->nr_resources; i++) {
+		struct resource *res = chan->resources + i;
+		resource_size_t size;
 
-	pci_write_reg(chan, 0x0000380f, SH4_PCIAINTM);
+		if (unlikely(res->flags & IORESOURCE_IO))
+			continue;
 
-	/* Set up standard PCI config registers */
-	__raw_writew(0xFB00, chan->reg_base + SH7780_PCISTATUS);
-	__raw_writew(0x0047, chan->reg_base + SH7780_PCICMD);
-	__raw_writew(0x1912, chan->reg_base + SH7780_PCISVID);
-	__raw_writew(0x0001, chan->reg_base + SH7780_PCISID);
+		/*
+		 * Make sure we're in the right physical addressing mode
+		 * for dealing with the resource.
+		 */
+		if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode()) {
+			chan->nr_resources--;
+			continue;
+		}
 
-	__raw_writeb(0x00, chan->reg_base + SH7780_PCIPIF);
+		size = resource_size(res);
 
-	/* Apply any last-minute PCIC fixups */
-	pci_fixup_pcic(chan);
+		/*
+		 * The MBMR mask is calculated in units of 256kB, which
+		 * keeps things pretty simple.
+		 */
+		__raw_writel(((roundup_pow_of_two(size) / SZ_256K) - 1) << 18,
+			     chan->reg_base + SH7780_PCIMBMR(i - 1));
+		__raw_writel(res->start, chan->reg_base + SH7780_PCIMBR(i - 1));
+	}
 
-	pci_write_reg(chan, 0xfd000000, SH7780_PCIMBR0);
-	pci_write_reg(chan, 0x00fc0000, SH7780_PCIMBMR0);
+	/*
+	 * And I/O.
+	 */
+	__raw_writel(0, chan->reg_base + PCI_BASE_ADDRESS_0);
+	__raw_writel(0, chan->reg_base + SH7780_PCIIOBR);
+	__raw_writel(0, chan->reg_base + SH7780_PCIIOBMR);
 
-#ifdef CONFIG_32BIT
-	pci_write_reg(chan, 0xc0000000, SH7780_PCIMBR2);
-	pci_write_reg(chan, 0x20000000 - SH7780_PCI_IO_SIZE, SH7780_PCIMBMR2);
-#endif
+	__raw_writew(PCI_COMMAND_SERR   | PCI_COMMAND_WAIT   | \
+		     PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | \
+		     PCI_COMMAND_MEMORY, chan->reg_base + PCI_COMMAND);
 
-	/* Set IOBR for windows containing area specified in pci.h */
-	pci_write_reg(chan, chan->io_resource->start & ~(SH7780_PCI_IO_SIZE-1),
-		      SH7780_PCIIOBR);
-	pci_write_reg(chan, ((SH7780_PCI_IO_SIZE-1) & (7<<18)),
-		      SH7780_PCIIOBMR);
+	/*
+	 * Initialization mode complete, release the control register and
+	 * enable round robin mode to stop device overruns/starvation.
+	 */
+	__raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_FTO,
+		     chan->reg_base + SH4_PCICR);
 
-	/* SH7780 init done, set central function init complete */
-	/* use round robin mode to stop a device starving/overruning */
-	word = SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_FTO;
-	pci_write_reg(chan, word, SH4_PCICR);
+	ret = register_pci_controller(chan);
+	if (unlikely(ret))
+		goto err;
 
-	register_pci_controller(chan);
+	sh7780_pci66_init(chan);
+
+	printk(KERN_NOTICE "PCI: Running at %dMHz.\n",
+	       (__raw_readw(chan->reg_base + PCI_STATUS) & PCI_STATUS_66MHZ) ?
+	       66 : 33);
 
 	return 0;
+
+err:
+	sh7780_pci_teardown_irqs(chan);
+	return ret;
 }
 arch_initcall(sh7780_pci_init);
diff --git a/arch/sh/drivers/pci/pci-sh7780.h b/arch/sh/drivers/pci/pci-sh7780.h
index 4a52478..205dcbe 100644
--- a/arch/sh/drivers/pci/pci-sh7780.h
+++ b/arch/sh/drivers/pci/pci-sh7780.h
@@ -12,12 +12,11 @@
 #ifndef _PCI_SH7780_H_
 #define _PCI_SH7780_H_
 
-/* Platform Specific Values */
-#define SH7780_VENDOR_ID	0x1912
-#define SH7781_DEVICE_ID	0x0001
-#define SH7780_DEVICE_ID	0x0002
-#define SH7763_DEVICE_ID	0x0004
-#define SH7785_DEVICE_ID	0x0007
+#define PCI_VENDOR_ID_RENESAS		0x1912
+#define PCI_DEVICE_ID_RENESAS_SH7781	0x0001
+#define PCI_DEVICE_ID_RENESAS_SH7780	0x0002
+#define PCI_DEVICE_ID_RENESAS_SH7763	0x0004
+#define PCI_DEVICE_ID_RENESAS_SH7785	0x0007
 
 /* SH7780 Control Registers */
 #define	PCIECR			0xFE000008
@@ -27,44 +26,9 @@
 #define SH7780_PCI_CONFIG_BASE	0xFD000000	/* Config space base addr */
 #define SH7780_PCI_CONFIG_SIZE	0x01000000	/* Config space size */
 
-#define SH7780_PCI_MEMORY_BASE	0xFD000000	/* Memory space base addr */
-#define SH7780_PCI_MEM_SIZE	0x01000000	/* Size of Memory window */
-
-#define SH7780_PCI_IO_BASE	0xFE200000	/* IO space base address */
-#define SH7780_PCI_IO_SIZE	0x00400000	/* Size of IO window */
-
 #define SH7780_PCIREG_BASE	0xFE040000	/* PCI regs base address */
 
 /* SH7780 PCI Config Registers */
-#define SH7780_PCIVID		0x000		/* Vendor ID */
-#define SH7780_PCIDID		0x002		/* Device ID */
-#define SH7780_PCICMD		0x004		/* Command */
-#define SH7780_PCISTATUS	0x006		/* Status */
-#define SH7780_PCIRID		0x008		/* Revision ID */
-#define SH7780_PCIPIF		0x009		/* Program Interface */
-#define SH7780_PCISUB		0x00a		/* Sub class code */
-#define SH7780_PCIBCC		0x00b		/* Base class code */
-#define SH7780_PCICLS		0x00c		/* Cache line size */
-#define SH7780_PCILTM		0x00d		/* latency timer */
-#define SH7780_PCIHDR		0x00e		/* Header type */
-#define SH7780_PCIBIST		0x00f		/* BIST */
-#define SH7780_PCIIBAR		0x010		/* IO Base address */
-#define SH7780_PCIMBAR0		0x014		/* Memory base address0 */
-#define SH7780_PCIMBAR1		0x018		/* Memory base address1 */
-#define SH7780_PCISVID		0x02c		/* Sub system vendor ID */
-#define SH7780_PCISID		0x02e		/* Sub system ID */
-#define SH7780_PCICP		0x034
-#define SH7780_PCIINTLINE	0x03c		/* Interrupt line */
-#define SH7780_PCIINTPIN	0x03d		/* Interrupt pin */
-#define SH7780_PCIMINGNT	0x03e		/* Minumum grand */
-#define SH7780_PCIMAXLAT	0x03f		/* Maxmum latency */
-#define SH7780_PCICID		0x040
-#define SH7780_PCINIP		0x041
-#define SH7780_PCIPMC		0x042
-#define SH7780_PCIPMCSR		0x044
-#define SH7780_PCIPMCSR_BSE	0x046
-#define SH7780_PCICDD		0x047
-
 #define SH7780_PCIIR		0x114		/* PCI Interrupt Register */
 #define SH7780_PCIIMR		0x118		/* PCI Interrupt Mask Register */
 #define SH7780_PCIAIR		0x11C		/* Error Address Register */
@@ -76,10 +40,8 @@
 #define SH7780_PCIPINT		0x1CC		/* Power Mgmnt Int. Register */
 #define SH7780_PCIPINTM		0x1D0		/* Power Mgmnt Mask Register */
 
-#define SH7780_PCIMBR0		0x1E0
-#define SH7780_PCIMBMR0		0x1E4
-#define SH7780_PCIMBR2		0x1F0
-#define SH7780_PCIMBMR2		0x1F4
+#define SH7780_PCIMBR(x)	(0x1E0 + ((x) * 8))
+#define SH7780_PCIMBMR(x)	(0x1E4 + ((x) * 8))
 #define SH7780_PCIIOBR		0x1F8
 #define SH7780_PCIIOBMR		0x1FC
 #define SH7780_PCICSCR0		0x210		/* Cache Snoop1 Cnt. Register */
@@ -87,16 +49,4 @@
 #define SH7780_PCICSAR0		0x218	/* Cache Snoop1 Addr. Register */
 #define SH7780_PCICSAR1		0x21C	/* Cache Snoop2 Addr. Register */
 
-/* General Memory Config Addresses */
-#define SH7780_CS0_BASE_ADDR	0x0
-#define SH7780_MEM_REGION_SIZE	0x04000000
-#define SH7780_CS1_BASE_ADDR	(SH7780_CS0_BASE_ADDR + SH7780_MEM_REGION_SIZE)
-#define SH7780_CS2_BASE_ADDR	(SH7780_CS1_BASE_ADDR + SH7780_MEM_REGION_SIZE)
-#define SH7780_CS3_BASE_ADDR	(SH7780_CS2_BASE_ADDR + SH7780_MEM_REGION_SIZE)
-#define SH7780_CS4_BASE_ADDR	(SH7780_CS3_BASE_ADDR + SH7780_MEM_REGION_SIZE)
-#define SH7780_CS5_BASE_ADDR	(SH7780_CS4_BASE_ADDR + SH7780_MEM_REGION_SIZE)
-#define SH7780_CS6_BASE_ADDR	(SH7780_CS5_BASE_ADDR + SH7780_MEM_REGION_SIZE)
-
-#define SH7780_32BIT_DDR_BASE_ADDR	0x40000000
-
 #endif /* _PCI_SH7780_H_ */
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index c481df6..953af13 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -33,15 +33,22 @@
 static void __devinit pcibios_scanbus(struct pci_channel *hose)
 {
 	static int next_busno;
+	static int need_domain_info;
 	struct pci_bus *bus;
 
 	bus = pci_scan_bus(next_busno, hose->pci_ops, hose);
+	hose->bus = bus;
+
+	need_domain_info = need_domain_info || hose->index;
+	hose->need_domain_info = need_domain_info;
 	if (bus) {
 		next_busno = bus->subordinate + 1;
 		/* Don't allow 8-bit bus number overflow inside the hose -
 		   reserve some space for bridges. */
-		if (next_busno > 224)
+		if (next_busno > 224) {
 			next_busno = 0;
+			need_domain_info = 1;
+		}
 
 		pci_bus_size_bridges(bus);
 		pci_bus_assign_resources(bus);
@@ -51,10 +58,21 @@
 
 static DEFINE_MUTEX(pci_scan_mutex);
 
-void __devinit register_pci_controller(struct pci_channel *hose)
+int __devinit register_pci_controller(struct pci_channel *hose)
 {
-	request_resource(&iomem_resource, hose->mem_resource);
-	request_resource(&ioport_resource, hose->io_resource);
+	int i;
+
+	for (i = 0; i < hose->nr_resources; i++) {
+		struct resource *res = hose->resources + i;
+
+		if (res->flags & IORESOURCE_IO) {
+			if (request_resource(&ioport_resource, res) < 0)
+				goto out;
+		} else {
+			if (request_resource(&iomem_resource, res) < 0)
+				goto out;
+		}
+	}
 
 	*hose_tail = hose;
 	hose_tail = &hose->next;
@@ -68,6 +86,11 @@
 	}
 
 	/*
+	 * Setup the ERR/PERR and SERR timers, if available.
+	 */
+	pcibios_enable_timers(hose);
+
+	/*
 	 * Scan the bus if it is register after the PCI subsystem
 	 * initialization.
 	 */
@@ -76,6 +99,15 @@
 		pcibios_scanbus(hose);
 		mutex_unlock(&pci_scan_mutex);
 	}
+
+	return 0;
+
+out:
+	for (--i; i >= 0; i--)
+		release_resource(&hose->resources[i]);
+
+	printk(KERN_WARNING "Skipping PCI bus scan due to resource conflict\n");
+	return -1;
 }
 
 static int __init pcibios_init(void)
@@ -127,11 +159,13 @@
 {
 	struct pci_dev *dev = bus->self;
 	struct list_head *ln;
-	struct pci_channel *chan = bus->sysdata;
+	struct pci_channel *hose = bus->sysdata;
 
 	if (!dev) {
-		bus->resource[0] = chan->io_resource;
-		bus->resource[1] = chan->mem_resource;
+		int i;
+
+		for (i = 0; i < hose->nr_resources; i++)
+			bus->resource[i] = hose->resources + i;
 	}
 
 	for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) {
@@ -148,34 +182,29 @@
  * addresses to be allocated in the 0x000-0x0ff region
  * modulo 0x400.
  */
-void pcibios_align_resource(void *data, struct resource *res,
-			    resource_size_t size, resource_size_t align)
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+				resource_size_t size, resource_size_t align)
 {
 	struct pci_dev *dev = data;
-	struct pci_channel *chan = dev->sysdata;
+	struct pci_channel *hose = dev->sysdata;
 	resource_size_t start = res->start;
 
 	if (res->flags & IORESOURCE_IO) {
-		if (start < PCIBIOS_MIN_IO + chan->io_resource->start)
-			start = PCIBIOS_MIN_IO + chan->io_resource->start;
+		if (start < PCIBIOS_MIN_IO + hose->resources[0].start)
+			start = PCIBIOS_MIN_IO + hose->resources[0].start;
 
 		/*
                  * Put everything into 0x00-0xff region modulo 0x400.
 		 */
-		if (start & 0x300) {
+		if (start & 0x300)
 			start = (start + 0x3ff) & ~0x3ff;
-			res->start = start;
-		}
-	} else if (res->flags & IORESOURCE_MEM) {
-		if (start < PCIBIOS_MIN_MEM + chan->mem_resource->start)
-			start = PCIBIOS_MIN_MEM + chan->mem_resource->start;
 	}
 
-	res->start = start;
+	return start;
 }
 
 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
-			 struct resource *res)
+			     struct resource *res)
 {
 	struct pci_channel *hose = dev->sysdata;
 	unsigned long offset = 0;
@@ -189,9 +218,8 @@
 	region->end = res->end - offset;
 }
 
-void __devinit
-pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
-			struct pci_bus_region *region)
+void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+			     struct pci_bus_region *region)
 {
 	struct pci_channel *hose = dev->sysdata;
 	unsigned long offset = 0;
@@ -274,6 +302,86 @@
 	return str;
 }
 
+static void __init
+pcibios_bus_report_status_early(struct pci_channel *hose,
+				int top_bus, int current_bus,
+				unsigned int status_mask, int warn)
+{
+	unsigned int pci_devfn;
+	u16 status;
+	int ret;
+
+	for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) {
+		if (PCI_FUNC(pci_devfn))
+			continue;
+		ret = early_read_config_word(hose, top_bus, current_bus,
+					     pci_devfn, PCI_STATUS, &status);
+		if (ret != PCIBIOS_SUCCESSFUL)
+			continue;
+		if (status == 0xffff)
+			continue;
+
+		early_write_config_word(hose, top_bus, current_bus,
+					pci_devfn, PCI_STATUS,
+					status & status_mask);
+		if (warn)
+			printk("(%02x:%02x: %04X) ", current_bus,
+			       pci_devfn, status);
+	}
+}
+
+/*
+ * We can't use pci_find_device() here since we are
+ * called from interrupt context.
+ */
+static void __init_refok
+pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask,
+			  int warn)
+{
+	struct pci_dev *dev;
+
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		u16 status;
+
+		/*
+		 * ignore host bridge - we handle
+		 * that separately
+		 */
+		if (dev->bus->number == 0 && dev->devfn == 0)
+			continue;
+
+		pci_read_config_word(dev, PCI_STATUS, &status);
+		if (status == 0xffff)
+			continue;
+
+		if ((status & status_mask) == 0)
+			continue;
+
+		/* clear the status errors */
+		pci_write_config_word(dev, PCI_STATUS, status & status_mask);
+
+		if (warn)
+			printk("(%s: %04X) ", pci_name(dev), status);
+	}
+
+	list_for_each_entry(dev, &bus->devices, bus_list)
+		if (dev->subordinate)
+			pcibios_bus_report_status(dev->subordinate, status_mask, warn);
+}
+
+void __init_refok pcibios_report_status(unsigned int status_mask, int warn)
+{
+	struct pci_channel *hose;
+
+	for (hose = hose_head; hose; hose = hose->next) {
+		if (unlikely(!hose->bus))
+			pcibios_bus_report_status_early(hose, hose_head->index,
+					hose->index, status_mask, warn);
+		else
+			pcibios_bus_report_status(hose->bus, status_mask, warn);
+	}
+}
+
 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
 			enum pci_mmap_state mmap_state, int write_combine)
 {
@@ -302,9 +410,15 @@
 {
 	struct pci_channel *chan = dev->sysdata;
 
-	if (!chan->io_map_base)
+	if (unlikely(!chan->io_map_base)) {
 		chan->io_map_base = generic_io_base;
 
+		if (pci_domains_supported)
+			panic("To avoid data corruption io_map_base MUST be "
+			      "set with multiple PCI domains.");
+	}
+
+
 	return (void __iomem *)(chan->io_map_base + port);
 }
 
@@ -321,20 +435,9 @@
 
 	if (flags & IORESOURCE_IO)
 		return ioport_map_pci(dev, start, len);
-
-	/*
-	 * Presently the IORESOURCE_MEM case is a bit special, most
-	 * SH7751 style PCI controllers have PCI memory at a fixed
-	 * location in the address space where no remapping is desired.
-	 * With the IORESOURCE_MEM case more care has to be taken
-	 * to inhibit page table mapping for legacy cores, but this is
-	 * punted off to __ioremap().
-	 *					-- PFM.
-	 */
 	if (flags & IORESOURCE_MEM) {
 		if (flags & IORESOURCE_CACHEABLE)
 			return ioremap(start, len);
-
 		return ioremap_nocache(start, len);
 	}
 
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
index ac37ee8..ae91a2d 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.c
+++ b/arch/sh/drivers/pci/pcie-sh7786.c
@@ -1,7 +1,7 @@
 /*
  * Low-Level PCI Express Support for the SH7786
  *
- *  Copyright (C) 2009  Paul Mundt
+ *  Copyright (C) 2009 - 2010  Paul Mundt
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -30,60 +30,84 @@
 	int (*port_init_hw)(struct sh7786_pcie_port *port);
 } *sh7786_pcie_hwops;
 
-static struct resource sh7786_pci_32bit_mem_resources[] = {
+static struct resource sh7786_pci0_resources[] = {
 	{
-		.name	= "pci0_mem",
-		.start	= SH4A_PCIMEM_BASEA,
-		.end	= SH4A_PCIMEM_BASEA + SZ_64M - 1,
+		.name	= "PCIe0 IO",
+		.start	= 0xfd000000,
+		.end	= 0xfd000000 + SZ_8M - 1,
+		.flags	= IORESOURCE_IO,
+	}, {
+		.name	= "PCIe0 MEM 0",
+		.start	= 0xc0000000,
+		.end	= 0xc0000000 + SZ_512M - 1,
+		.flags	= IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
+	}, {
+		.name	= "PCIe0 MEM 1",
+		.start	= 0x10000000,
+		.end	= 0x10000000 + SZ_64M - 1,
 		.flags	= IORESOURCE_MEM,
 	}, {
-		.name	= "pci1_mem",
-		.start	= SH4A_PCIMEM_BASEA1,
-		.end	= SH4A_PCIMEM_BASEA1 + SZ_64M - 1,
-		.flags	= IORESOURCE_MEM,
-	}, {
-		.name	= "pci2_mem",
-		.start	= SH4A_PCIMEM_BASEA2,
-		.end	= SH4A_PCIMEM_BASEA2 + SZ_64M - 1,
-		.flags	= IORESOURCE_MEM,
+		.name	= "PCIe0 MEM 2",
+		.start	= 0xfe100000,
+		.end	= 0xfe100000 + SZ_1M - 1,
 	},
 };
 
-static struct resource sh7786_pci_29bit_mem_resource = {
-	.start	= SH4A_PCIMEM_BASE,
-	.end	= SH4A_PCIMEM_BASE + SZ_64M - 1,
-	.flags	= IORESOURCE_MEM,
+static struct resource sh7786_pci1_resources[] = {
+	{
+		.name	= "PCIe1 IO",
+		.start	= 0xfd800000,
+		.end	= 0xfd800000 + SZ_8M - 1,
+		.flags	= IORESOURCE_IO,
+	}, {
+		.name	= "PCIe1 MEM 0",
+		.start	= 0xa0000000,
+		.end	= 0xa0000000 + SZ_512M - 1,
+		.flags	= IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
+	}, {
+		.name	= "PCIe1 MEM 1",
+		.start	= 0x30000000,
+		.end	= 0x30000000 + SZ_256M - 1,
+		.flags	= IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
+	}, {
+		.name	= "PCIe1 MEM 2",
+		.start	= 0xfe300000,
+		.end	= 0xfe300000 + SZ_1M - 1,
+	},
 };
 
-static struct resource sh7786_pci_io_resources[] = {
+static struct resource sh7786_pci2_resources[] = {
 	{
-		.name	= "pci0_io",
-		.start	= SH4A_PCIIO_BASE,
-		.end	= SH4A_PCIIO_BASE + SZ_8M - 1,
-		.flags	= IORESOURCE_IO,
+		.name	= "PCIe2 IO",
+		.start	= 0xfc800000,
+		.end	= 0xfc800000 + SZ_4M - 1,
 	}, {
-		.name	= "pci1_io",
-		.start	= SH4A_PCIIO_BASE1,
-		.end	= SH4A_PCIIO_BASE1 + SZ_8M - 1,
-		.flags	= IORESOURCE_IO,
+		.name	= "PCIe2 MEM 0",
+		.start	= 0x80000000,
+		.end	= 0x80000000 + SZ_512M - 1,
+		.flags	= IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
 	}, {
-		.name	= "pci2_io",
-		.start	= SH4A_PCIIO_BASE2,
-		.end	= SH4A_PCIIO_BASE2 + SZ_4M - 1,
-		.flags	= IORESOURCE_IO,
+		.name	= "PCIe2 MEM 1",
+		.start	= 0x20000000,
+		.end	= 0x20000000 + SZ_256M - 1,
+		.flags	= IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
+	}, {
+		.name	= "PCIe2 MEM 2",
+		.start	= 0xfcd00000,
+		.end	= 0xfcd00000 + SZ_1M - 1,
 	},
 };
 
 extern struct pci_ops sh7786_pci_ops;
 
-#define DEFINE_CONTROLLER(start, idx)				\
-{								\
-	.pci_ops	= &sh7786_pci_ops,			\
-	.reg_base	= start,				\
-	/* mem_resource filled in at probe time */		\
-	.mem_offset	= 0,					\
-	.io_resource	= &sh7786_pci_io_resources[idx],	\
-	.io_offset	= 0,					\
+#define DEFINE_CONTROLLER(start, idx)					\
+{									\
+	.pci_ops	= &sh7786_pci_ops,				\
+	.resources	= sh7786_pci##idx##_resources,			\
+	.nr_resources	= ARRAY_SIZE(sh7786_pci##idx##_resources),	\
+	.reg_base	= start,					\
+	.mem_offset	= 0,						\
+	.io_offset	= 0,						\
 }
 
 static struct pci_channel sh7786_pci_channels[] = {
@@ -180,7 +204,9 @@
 {
 	struct pci_channel *chan = port->hose;
 	unsigned int data;
-	int ret;
+	phys_addr_t memphys;
+	size_t memsize;
+	int ret, i;
 
 	/* Begin initialization */
 	pci_write_reg(chan, 0, SH4A_PCIETCTLR);
@@ -203,15 +229,24 @@
 	data |= PCI_CAP_ID_EXP;
 	pci_write_reg(chan, data, SH4A_PCIEEXPCAP0);
 
-	/* Enable x4 link width and extended sync. */
+	/* Enable data link layer active state reporting */
+	pci_write_reg(chan, PCI_EXP_LNKCAP_DLLLARC, SH4A_PCIEEXPCAP3);
+
+	/* Enable extended sync and ASPM L0s support */
 	data = pci_read_reg(chan, SH4A_PCIEEXPCAP4);
-	data &= ~(PCI_EXP_LNKSTA_NLW << 16);
-	data |= (1 << 22) | PCI_EXP_LNKCTL_ES;
+	data &= ~PCI_EXP_LNKCTL_ASPMC;
+	data |= PCI_EXP_LNKCTL_ES | 1;
 	pci_write_reg(chan, data, SH4A_PCIEEXPCAP4);
 
+	/* Write out the physical slot number */
+	data = pci_read_reg(chan, SH4A_PCIEEXPCAP5);
+	data &= ~PCI_EXP_SLTCAP_PSN;
+	data |= (port->index + 1) << 19;
+	pci_write_reg(chan, data, SH4A_PCIEEXPCAP5);
+
 	/* Set the completion timer timeout to the maximum 32ms. */
 	data = pci_read_reg(chan, SH4A_PCIETLCTLR);
-	data &= ~0xffff;
+	data &= ~0x3f00;
 	data |= 0x32 << 8;
 	pci_write_reg(chan, data, SH4A_PCIETLCTLR);
 
@@ -224,6 +259,33 @@
 	data |= (0xff << 16);
 	pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
 
+	memphys = __pa(memory_start);
+	memsize = roundup_pow_of_two(memory_end - memory_start);
+
+	/*
+	 * If there's more than 512MB of memory, we need to roll over to
+	 * LAR1/LAMR1.
+	 */
+	if (memsize > SZ_512M) {
+		__raw_writel(memphys + SZ_512M, chan->reg_base + SH4A_PCIELAR1);
+		__raw_writel(((memsize - SZ_512M) - SZ_256) | 1,
+			     chan->reg_base + SH4A_PCIELAMR1);
+		memsize = SZ_512M;
+	} else {
+		/*
+		 * Otherwise just zero it out and disable it.
+		 */
+		__raw_writel(0, chan->reg_base + SH4A_PCIELAR1);
+		__raw_writel(0, chan->reg_base + SH4A_PCIELAMR1);
+	}
+
+	/*
+	 * LAR0/LAMR0 covers up to the first 512MB, which is enough to
+	 * cover all of lowmem on most platforms.
+	 */
+	__raw_writel(memphys, chan->reg_base + SH4A_PCIELAR0);
+	__raw_writel((memsize - SZ_256) | 1, chan->reg_base + SH4A_PCIELAMR0);
+
 	/* Finish initialization */
 	data = pci_read_reg(chan, SH4A_PCIETCTLR);
 	data |= 0x1;
@@ -243,10 +305,14 @@
 	if (unlikely(ret != 0))
 		return -ENODEV;
 
-	pci_write_reg(chan, 0x00100007, SH4A_PCIEPCICONF1);
+	data = pci_read_reg(chan, SH4A_PCIEPCICONF1);
+	data &= ~(PCI_STATUS_DEVSEL_MASK << 16);
+	data |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+		(PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST) << 16;
+	pci_write_reg(chan, data, SH4A_PCIEPCICONF1);
+
 	pci_write_reg(chan, 0x80888000, SH4A_PCIETXVC0DCTLR);
 	pci_write_reg(chan, 0x00222000, SH4A_PCIERXVC0DCTLR);
-	pci_write_reg(chan, 0x000050A0, SH4A_PCIEEXPCAP2);
 
 	wmb();
 
@@ -254,15 +320,32 @@
 	printk(KERN_NOTICE "PCI: PCIe#%d link width %d\n",
 	       port->index, (data >> 20) & 0x3f);
 
-	pci_write_reg(chan, 0x007c0000, SH4A_PCIEPAMR0);
-	pci_write_reg(chan, 0x00000000, SH4A_PCIEPARH0);
-	pci_write_reg(chan, 0x00000000, SH4A_PCIEPARL0);
-	pci_write_reg(chan, 0x80000100, SH4A_PCIEPTCTLR0);
 
-	pci_write_reg(chan, 0x03fc0000, SH4A_PCIEPAMR2);
-	pci_write_reg(chan, 0x00000000, SH4A_PCIEPARH2);
-	pci_write_reg(chan, 0x00000000, SH4A_PCIEPARL2);
-	pci_write_reg(chan, 0x80000000, SH4A_PCIEPTCTLR2);
+	for (i = 0; i < chan->nr_resources; i++) {
+		struct resource *res = chan->resources + i;
+		resource_size_t size;
+		u32 enable_mask;
+
+		pci_write_reg(chan, 0x00000000, SH4A_PCIEPTCTLR(i));
+
+		size = resource_size(res);
+
+		/*
+		 * The PAMR mask is calculated in units of 256kB, which
+		 * keeps things pretty simple.
+		 */
+		__raw_writel(((roundup_pow_of_two(size) / SZ_256K) - 1) << 18,
+			     chan->reg_base + SH4A_PCIEPAMR(i));
+
+		pci_write_reg(chan, 0x00000000, SH4A_PCIEPARH(i));
+		pci_write_reg(chan, 0x00000000, SH4A_PCIEPARL(i));
+
+		enable_mask = MASK_PARE;
+		if (res->flags & IORESOURCE_IO)
+			enable_mask |= MASK_SPC;
+
+		pci_write_reg(chan, enable_mask, SH4A_PCIEPTCTLR(i));
+	}
 
 	return 0;
 }
@@ -296,9 +379,7 @@
 	if (unlikely(ret < 0))
 		return ret;
 
-	register_pci_controller(port->hose);
-
-	return 0;
+	return register_pci_controller(port->hose);
 }
 
 static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
@@ -332,17 +413,7 @@
 
 		port->index		= i;
 		port->hose		= sh7786_pci_channels + i;
-		port->hose->io_map_base	= port->hose->io_resource->start;
-
-		/*
-		 * Check if we are booting in 29 or 32-bit mode
-		 *
-		 * 32-bit mode provides each controller with its own
-		 * memory window, while 29-bit mode uses a shared one.
-		 */
-		port->hose->mem_resource = test_mode_pin(MODE_PIN10) ?
-			&sh7786_pci_32bit_mem_resources[i] :
-			&sh7786_pci_29bit_mem_resource;
+		port->hose->io_map_base	= port->hose->resources[0].start;
 
 		ret |= sh7786_pcie_hwops->port_init_hw(port);
 	}
diff --git a/arch/sh/drivers/pci/pcie-sh7786.h b/arch/sh/drivers/pci/pcie-sh7786.h
index c655290..90a6992 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.h
+++ b/arch/sh/drivers/pci/pcie-sh7786.h
@@ -30,47 +30,9 @@
  * for other(Max Payload Size=4096B,PCIIO_SIZE=8M)
  */
 
-/* PCI0-0: PCI I/O space */
-#define SH4A_PCIIO_BASE		0xFD000000	/* PCI I/O for controller 0 */
-#define SH4A_PCIIO_BASE1	0xFD800000	/* PCI I/O for controller 1 (Rev1.14)*/
-#define SH4A_PCIIO_BASE2	0xFC800000	/* PCI I/O for controller 2 (Rev1.171)*/
-
-#define SH4A_PCIIO_SIZE64	0x00010000	/* PLX allows only 64K */
-#define SH4A_PCIIO_SIZE		0x00800000	/* 8M */
-#define SH4A_PCIIO_SIZE2	0x00400000	/* 4M (Rev1.171)*/
-
-/* PCI0-1: PCI memory space 29-bit address */
-#define SH4A_PCIMEM_BASE	0x10000000
-#define SH4A_PCIMEM_SIZE	0x04000000	/* 64M */
-
-/* PCI0-2: PCI memory space 32-bit address */
-#define SH4A_PCIMEM_BASEA	0xC0000000	/*  for controller 0 */
-#define SH4A_PCIMEM_BASEA1	0xA0000000	/*  for controller 1 (Rev1.14)*/
-#define SH4A_PCIMEM_BASEA2	0x80000000	/*  for controller 2 (Rev1.171)*/
-#define SH4A_PCIMEM_SIZEA	0x20000000	/* 512M */
-
 /* PCI0: PCI memory target transfer 32-bit address translation value(Rev1.11T)*/
 #define SH4A_PCIBMSTR_TRANSLATION	0x20000000
 
-#define SH4A_PCI_DEVICE_ID		0x0002
-#define SH4A_PCI_VENDOR_ID		0x1912
-
-// PCI compatible 000-03f
-#define PCI_CMD		0x004
-#define PCI_RID		0x008
-#define PCI_IBAR	0x010
-#define PCI_MBAR0	0x014
-#define PCI_MBAR1	0x018
-
-/* PCI power management/MSI/capablity 040-0ff */
-/* PCIE extended 100-fff */
-
-/* SH7786 device identification */	// Rev1.171
-#define SH4A_PVR		(0xFF000030)
-#define SH4A_PVR_SHX3		(0x10400000)
-#define SH4A_PRR		(0xFF000044)
-#define SH4A_PRR_SH7786		(0x00000400)	// Rev1.171
-
 /*	SPVCR0		*/
 #define	SH4A_PCIEVCR0		(0x000000)	/* R - 0x0000 0000 32 */
 #define		BITS_TOP_MB	(24)
@@ -350,23 +312,23 @@
 #define	SH4A_PCIECSAR5		(0x0202B4)	/* R/W R/W 0x0000 0000 32 */
 #define	SH4A_PCIESTCTLR5	(0x0202B8)	/* R/W R/W 0x0000 0000 32 */
 
-/*	PCIEPARL0	*/
-#define	SH4A_PCIEPARL0		(0x020400)	/* R/W R/W 0x0000 0000 32 */
+/*	PCIEPARL	*/
+#define	SH4A_PCIEPARL(x)	(0x020400 + ((x) * 0x20)) /* R/W R/W 0x0000 0000 32 */
 #define		BITS_PAL	(18)
 #define		MASK_PAL	(0x3fff<<BITS_PAL)
 
-/*	PCIEPARH0	*/
-#define	SH4A_PCIEPARH0		(0x020404)	/* R/W R/W 0x0000 0000 32 */
+/*	PCIEPARH	*/
+#define	SH4A_PCIEPARH(x)	(0x020404 + ((x) * 0x20)) /* R/W R/W 0x0000 0000 32 */
 #define		BITS_PAH	(0)
 #define		MASK_PAH	(0xffffffff<<BITS_PAH)
 
-/*	PCIEPAMR0	 */
-#define	SH4A_PCIEPAMR0		(0x020408)	/* R/W R/W 0x0000 0000 32 */
+/*	PCIEPAMR	 */
+#define	SH4A_PCIEPAMR(x)	(0x020408 + ((x) * 0x20)) /* R/W R/W 0x0000 0000 32 */
 #define		BITS_PAM	(18)
 #define		MASK_PAM	(0x3fff<<BITS_PAM)
 
-/*	PCIEPTCTLR0	*/
-#define	SH4A_PCIEPTCTLR0	(0x02040C)	/* R/W R/W 0x0000 0000 32 */
+/*	PCIEPTCTLR	*/
+#define SH4A_PCIEPTCTLR(x)	(0x02040C + ((x) * 0x20))
 #define		BITS_PARE	(31)
 #define		MASK_PARE	(0x1<<BITS_PARE)
 #define		BITS_TC		(20)
@@ -378,26 +340,6 @@
 #define		BITS_SPC	(8)
 #define		MASK_SPC	(0x1<<BITS_SPC)
 
-#define	SH4A_PCIEPARL1		(0x020420)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPARH1		(0x020424)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPAMR1		(0x020428)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPTCTLR1	(0x02042C)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPARL2		(0x020440)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPARH2		(0x020444)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPAMR2		(0x020448)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPTCTLR2	(0x02044C)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPARL3		(0x020460)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPARH3		(0x020464)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPAMR3		(0x020468)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPTCTLR3	(0x02046C)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPARL4		(0x020480)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPARH4		(0x020484)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPAMR4		(0x020488)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPTCTLR4	(0x02048C)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPARL5		(0x0204A0)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPARH5		(0x0204A4)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPAMR5		(0x0204A8)	/* R/W R/W 0x0000 0000 32 */
-#define	SH4A_PCIEPTCTLR5	(0x0204AC)	/* R/W R/W 0x0000 0000 32 */
 #define	SH4A_PCIEDMAOR		(0x021000)	/* R/W R/W 0x0000 0000 32 */
 #define	SH4A_PCIEDMSAR0		(0x021100)	/* R/W R/W 0x0000 0000 32 */
 #define	SH4A_PCIEDMSAHR0	(0x021104)	/* R/W R/W 0x0000 0000 32 */
diff --git a/arch/sh/drivers/superhyway/ops-sh4-202.c b/arch/sh/drivers/superhyway/ops-sh4-202.c
index 3b14bf8..6da62e9 100644
--- a/arch/sh/drivers/superhyway/ops-sh4-202.c
+++ b/arch/sh/drivers/superhyway/ops-sh4-202.c
@@ -134,8 +134,8 @@
 	 *
 	 * Do not trust the documentation, for it is evil.
 	 */
-	vcrh = ctrl_inl(base);
-	vcrl = ctrl_inl(base + sizeof(u32));
+	vcrh = __raw_readl(base);
+	vcrl = __raw_readl(base + sizeof(u32));
 
 	tmp = ((u64)vcrh << 32) | vcrl;
 	memcpy(vcr, &tmp, sizeof(u64));
@@ -147,8 +147,8 @@
 {
 	u64 tmp = *(u64 *)&vcr;
 
-	ctrl_outl((tmp >> 32) & 0xffffffff, base);
-	ctrl_outl(tmp & 0xffffffff, base + sizeof(u32));
+	__raw_writel((tmp >> 32) & 0xffffffff, base);
+	__raw_writel(tmp & 0xffffffff, base + sizeof(u32));
 
 	return 0;
 }
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index e121c30..46cb934 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,6 +1,8 @@
 include include/asm-generic/Kbuild.asm
 
-header-y += cachectl.h cpu-features.h
+header-y += cachectl.h
+header-y += cpu-features.h
+header-y += hw_breakpoint.h
 
 unifdef-y += unistd_32.h
 unifdef-y += unistd_64.h
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 99d6b3e..446b383 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -28,7 +28,7 @@
 /* Returns the privileged segment base of a given address  */
 #define PXSEG(a)	(((unsigned long)(a)) & 0xe0000000)
 
-#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
+#ifdef CONFIG_29BIT
 /*
  * Map an address to a certain privileged segment
  */
@@ -40,7 +40,15 @@
 	((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
 #define P4SEGADDR(a)	\
 	((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
-#endif /* 29BIT || PMB_FIXED */
+#else
+/*
+ * These will never work in 32-bit, don't even bother.
+ */
+#define P1SEGADDR(a)	__futile_remapping_attempt
+#define P2SEGADDR(a)	__futile_remapping_attempt
+#define P3SEGADDR(a)	__futile_remapping_attempt
+#define P4SEGADDR(a)	__futile_remapping_attempt
+#endif
 #endif /* P1SEG */
 
 /* Check if an address can be reached in 29 bits */
@@ -57,11 +65,5 @@
 #define P3_ADDR_MAX		P4SEG
 #endif
 
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_PMB
-extern int __in_29bit_mode(void);
-#endif /* CONFIG_PMB */
-#endif /* __ASSEMBLY__ */
-
 #endif /* __KERNEL__ */
 #endif /* __ASM_SH_ADDRSPACE_H */
diff --git a/arch/sh/include/asm/alignment.h b/arch/sh/include/asm/alignment.h
new file mode 100644
index 0000000..b12efec
--- /dev/null
+++ b/arch/sh/include/asm/alignment.h
@@ -0,0 +1,21 @@
+#ifndef __ASM_SH_ALIGNMENT_H
+#define __ASM_SH_ALIGNMENT_H
+
+#include <linux/types.h>
+
+extern void inc_unaligned_byte_access(void);
+extern void inc_unaligned_word_access(void);
+extern void inc_unaligned_dword_access(void);
+extern void inc_unaligned_multi_access(void);
+extern void inc_unaligned_user_access(void);
+extern void inc_unaligned_kernel_access(void);
+
+#define UM_WARN		(1 << 0)
+#define UM_FIXUP	(1 << 1)
+#define UM_SIGNAL	(1 << 2)
+
+extern unsigned int unaligned_user_action(void);
+
+extern void unaligned_fixups_notify(struct task_struct *, insn_size_t, struct pt_regs *);
+
+#endif /* __ASM_SH_ALIGNMENT_H */
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index 4c5b7db..a273c88 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -120,50 +120,4 @@
 		: "memory" , "r0", "r1");
 }
 
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
-	int ret;
-
-	__asm__ __volatile__ (
-		"   .align 2		\n\t"
-		"   mova     1f,  r0	\n\t"
-		"   nop			\n\t"
-		"   mov     r15,  r1	\n\t"
-		"   mov    #-8,  r15	\n\t"
-		"   mov.l   @%1,  %0	\n\t"
-		"   cmp/eq   %2,  %0	\n\t"
-		"   bf	     1f		\n\t"
-		"   mov.l    %3, @%1	\n\t"
-		"1: mov      r1,  r15	\n\t"
-		: "=&r" (ret)
-		: "r" (v), "r" (old), "r" (new)
-		: "memory" , "r0", "r1" , "t");
-
-	return ret;
-}
-
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
-{
-	int ret;
-	unsigned long tmp;
-
-	__asm__ __volatile__ (
-		"   .align 2		\n\t"
-		"   mova    1f,   r0	\n\t"
-		"   nop			\n\t"
-		"   mov    r15,   r1	\n\t"
-		"   mov    #-12,  r15	\n\t"
-		"   mov.l  @%2,   %1	\n\t"
-		"   mov	    %1,   %0    \n\t"
-		"   cmp/eq  %4,   %0	\n\t"
-		"   bt/s    1f		\n\t"
-		"    add    %3,   %1	\n\t"
-		"   mov.l   %1,  @%2	\n\t"
-		"1: mov     r1,   r15	\n\t"
-		: "=&r" (ret), "=&r" (tmp)
-		: "r" (v), "r" (a), "r" (u)
-		: "memory" , "r0", "r1" , "t");
-
-	return ret != u;
-}
 #endif /* __ASM_SH_ATOMIC_GRB_H */
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index b040e1e..4b00b78 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -104,31 +104,4 @@
 	: "t");
 }
 
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-
-/**
- * atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
- */
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
-{
-	int c, old;
-	c = atomic_read(v);
-	for (;;) {
-		if (unlikely(c == (u)))
-			break;
-		old = atomic_cmpxchg((v), c, c + (a));
-		if (likely(old == c))
-			break;
-		c = old;
-	}
-
-	return c != (u);
-}
-
 #endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index b16388d..275a448 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -25,58 +25,43 @@
 #endif
 
 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
+#define atomic_dec_return(v)		atomic_sub_return(1, (v))
+#define atomic_inc_return(v)		atomic_add_return(1, (v))
+#define atomic_inc_and_test(v)		(atomic_inc_return(v) == 0)
+#define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
 
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-#define atomic_inc_return(v) atomic_add_return(1,(v))
+#define atomic_inc(v)			atomic_add(1, (v))
+#define atomic_dec(v)			atomic_sub(1, (v))
 
-/*
- * atomic_inc_and_test - increment and test
+#define atomic_xchg(v, new)		(xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, o, n)		(cmpxchg(&((v)->counter), (o), (n)))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
  * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
  *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-
-#define atomic_inc(v) atomic_add(1,(v))
-#define atomic_dec(v) atomic_sub(1,(v))
-
-#if !defined(CONFIG_GUSA_RB) && !defined(CONFIG_CPU_SH4A)
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
-	int ret;
-	unsigned long flags;
-
-	local_irq_save(flags);
-	ret = v->counter;
-	if (likely(ret == old))
-		v->counter = new;
-	local_irq_restore(flags);
-
-	return ret;
-}
-
 static inline int atomic_add_unless(atomic_t *v, int a, int u)
 {
-	int ret;
-	unsigned long flags;
+	int c, old;
+	c = atomic_read(v);
+	for (;;) {
+		if (unlikely(c == (u)))
+			break;
+		old = atomic_cmpxchg((v), c, c + (a));
+		if (likely(old == c))
+			break;
+		c = old;
+	}
 
-	local_irq_save(flags);
-	ret = v->counter;
-	if (ret != u)
-		v->counter += a;
-	local_irq_restore(flags);
-
-	return ret != u;
+	return c != (u);
 }
-#endif /* !CONFIG_GUSA_RB && !CONFIG_CPU_SH4A */
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 #define smp_mb__before_atomic_dec()	smp_mb()
 #define smp_mb__after_atomic_dec()	smp_mb()
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index dda96eb..da3ebec 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -63,6 +63,14 @@
 	if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
 		__flush_anon_page(page, vmaddr);
 }
+static inline void flush_kernel_vmap_range(void *addr, int size)
+{
+	__flush_wback_region(addr, size);
+}
+static inline void invalidate_kernel_vmap_range(void *addr, int size)
+{
+	__flush_invalidate_region(addr, size);
+}
 
 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
 static inline void flush_kernel_dcache_page(struct page *page)
diff --git a/arch/sh/include/asm/clock.h b/arch/sh/include/asm/clock.h
index 9fe7d7f..11da4c5 100644
--- a/arch/sh/include/asm/clock.h
+++ b/arch/sh/include/asm/clock.h
@@ -146,8 +146,17 @@
 	.flags = _flags,						\
 }
 
+struct clk_div4_table {
+	struct clk_div_mult_table *div_mult_table;
+	void (*kick)(struct clk *clk);
+};
+
 int sh_clk_div4_register(struct clk *clks, int nr,
-			 struct clk_div_mult_table *table);
+			 struct clk_div4_table *table);
+int sh_clk_div4_enable_register(struct clk *clks, int nr,
+			 struct clk_div4_table *table);
+int sh_clk_div4_reparent_register(struct clk *clks, int nr,
+			 struct clk_div4_table *table);
 
 #define SH_CLK_DIV6(_name, _parent, _reg, _flags)	\
 {							\
diff --git a/arch/sh/include/asm/cmpxchg-grb.h b/arch/sh/include/asm/cmpxchg-grb.h
index e2681ab..4676bf5 100644
--- a/arch/sh/include/asm/cmpxchg-grb.h
+++ b/arch/sh/include/asm/cmpxchg-grb.h
@@ -57,11 +57,10 @@
 		"   mov.l  @%1,   %0      \n\t" /* load  old value */
 		"   cmp/eq  %0,   %2      \n\t"
 		"   bf            1f      \n\t" /* if not equal */
-		"   mov.l   %2,   @%1     \n\t" /* store new value */
+		"   mov.l   %3,   @%1     \n\t" /* store new value */
 		"1: mov     r1,   r15     \n\t" /* LOGOUT */
-		: "=&r" (retval),
-		  "+r"  (m)
-		: "r"   (new)
+		: "=&r" (retval)
+		:  "r"  (m), "r"  (old), "r"  (new)
 		: "memory" , "r0", "r1", "t");
 
 	return retval;
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 87ced13..bea3337 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -89,8 +89,6 @@
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);
 
-	WARN_ON(irqs_disabled());	/* for portability */
-
 	if (dma_release_from_coherent(dev, get_order(size), vaddr))
 		return;
 
diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h
index 78eed3e0..e934a2e 100644
--- a/arch/sh/include/asm/dma-sh.h
+++ b/arch/sh/include/asm/dma-sh.h
@@ -20,14 +20,14 @@
     defined(CONFIG_CPU_SUBTYPE_SH7780)	|| \
     defined(CONFIG_CPU_SUBTYPE_SH7785)
 #define dmaor_read_reg(n) \
-    (n ? ctrl_inw(SH_DMAC_BASE1 + DMAOR) \
-	: ctrl_inw(SH_DMAC_BASE0 + DMAOR))
+    (n ? __raw_readw(SH_DMAC_BASE1 + DMAOR) \
+	: __raw_readw(SH_DMAC_BASE0 + DMAOR))
 #define dmaor_write_reg(n, data) \
-    (n ? ctrl_outw(data, SH_DMAC_BASE1 + DMAOR) \
-    : ctrl_outw(data, SH_DMAC_BASE0 + DMAOR))
+    (n ? __raw_writew(data, SH_DMAC_BASE1 + DMAOR) \
+    : __raw_writew(data, SH_DMAC_BASE0 + DMAOR))
 #else /* Other CPU */
-#define dmaor_read_reg(n) ctrl_inw(SH_DMAC_BASE0 + DMAOR)
-#define dmaor_write_reg(n, data) ctrl_outw(data, SH_DMAC_BASE0 + DMAOR)
+#define dmaor_read_reg(n) __raw_readw(SH_DMAC_BASE0 + DMAOR)
+#define dmaor_write_reg(n, data) __raw_writew(data, SH_DMAC_BASE0 + DMAOR)
 #endif
 
 static int dmte_irq_map[] __maybe_unused = {
@@ -64,8 +64,10 @@
 #define ACK_L	0x00010000
 #define DM_INC	0x00004000
 #define DM_DEC	0x00008000
+#define DM_FIX	0x0000c000
 #define SM_INC	0x00001000
 #define SM_DEC	0x00002000
+#define SM_FIX	0x00003000
 #define RS_IN	0x00000200
 #define RS_OUT	0x00000300
 #define TS_BLK	0x00000040
@@ -83,7 +85,7 @@
  * Define the default configuration for dual address memory-memory transfer.
  * The 0x400 value represents auto-request, external->external.
  */
-#define RS_DUAL	(DM_INC | SM_INC | 0x400 | TS_32)
+#define RS_DUAL	(DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT))
 
 /* DMA base address */
 static u32 dma_base_addr[] __maybe_unused = {
@@ -123,10 +125,47 @@
  */
 #define SHDMA_MIX_IRQ	(1 << 1)
 #define SHDMA_DMAOR1	(1 << 2)
-#define SHDMA_DMAE1		(1 << 3)
+#define SHDMA_DMAE1	(1 << 3)
+
+enum sh_dmae_slave_chan_id {
+	SHDMA_SLAVE_SCIF0_TX,
+	SHDMA_SLAVE_SCIF0_RX,
+	SHDMA_SLAVE_SCIF1_TX,
+	SHDMA_SLAVE_SCIF1_RX,
+	SHDMA_SLAVE_SCIF2_TX,
+	SHDMA_SLAVE_SCIF2_RX,
+	SHDMA_SLAVE_SCIF3_TX,
+	SHDMA_SLAVE_SCIF3_RX,
+	SHDMA_SLAVE_SCIF4_TX,
+	SHDMA_SLAVE_SCIF4_RX,
+	SHDMA_SLAVE_SCIF5_TX,
+	SHDMA_SLAVE_SCIF5_RX,
+	SHDMA_SLAVE_SIUA_TX,
+	SHDMA_SLAVE_SIUA_RX,
+	SHDMA_SLAVE_SIUB_TX,
+	SHDMA_SLAVE_SIUB_RX,
+	SHDMA_SLAVE_NUMBER,	/* Must stay last */
+};
+
+struct sh_dmae_slave_config {
+	enum sh_dmae_slave_chan_id	slave_id;
+	dma_addr_t			addr;
+	u32				chcr;
+	char				mid_rid;
+};
 
 struct sh_dmae_pdata {
 	unsigned int mode;
+	struct sh_dmae_slave_config *config;
+	int config_num;
+};
+
+struct device;
+
+struct sh_dmae_slave {
+	enum sh_dmae_slave_chan_id	slave_id; /* Set by the platform */
+	struct device			*dma_dev; /* Set by the platform */
+	struct sh_dmae_slave_config	*config;  /* Set by the driver */
 };
 
 #endif /* __DMA_SH_H */
diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h
index bdccbbf..d62abd1 100644
--- a/arch/sh/include/asm/dwarf.h
+++ b/arch/sh/include/asm/dwarf.h
@@ -243,16 +243,13 @@
 
 	unsigned long cie_pointer;
 
-	struct list_head link;
-
 	unsigned long flags;
 #define DWARF_CIE_Z_AUGMENTATION	(1 << 0)
 
-	/*
-	 * 'mod' will be non-NULL if this CIE came from a module's
-	 * .eh_frame section.
-	 */
-	struct module *mod;
+	/* linked-list entry if this CIE is from a module */
+	struct list_head link;
+
+	struct rb_node node;
 };
 
 /**
@@ -266,13 +263,11 @@
 	unsigned long address_range;
 	unsigned char *instructions;
 	unsigned char *end;
+
+	/* linked-list entry if this FDE is from a module */
 	struct list_head link;
 
-	/*
-	 * 'mod' will be non-NULL if this FDE came from a module's
-	 * .eh_frame section.
-	 */
-	struct module *mod;
+	struct rb_node node;
 };
 
 /**
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h
index 5ac1e40..6e7cea4 100644
--- a/arch/sh/include/asm/fixmap.h
+++ b/arch/sh/include/asm/fixmap.h
@@ -55,16 +55,29 @@
 #define FIX_N_COLOURS 8
 	FIX_CMAP_BEGIN,
 	FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1,
-	FIX_UNCACHED,
+
 #ifdef CONFIG_HIGHMEM
 	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
 	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
 #endif
+
+#ifdef CONFIG_IOREMAP_FIXED
+	/*
+	 * FIX_IOREMAP entries are useful for mapping physical address
+	 * space before ioremap() is useable, e.g. really early in boot
+	 * before kmalloc() is working.
+	 */
+#define FIX_N_IOREMAPS	32
+	FIX_IOREMAP_BEGIN,
+	FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS,
+#endif
+
 	__end_of_fixed_addresses
 };
 
 extern void __set_fixmap(enum fixed_addresses idx,
 			 unsigned long phys, pgprot_t flags);
+extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags);
 
 #define set_fixmap(idx, phys) \
 		__set_fixmap(idx, phys, PAGE_KERNEL)
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index fb6bbb9..06c4281 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -2,8 +2,8 @@
 #define __ASM_SH_FPU_H
 
 #ifndef __ASSEMBLY__
-#include <linux/preempt.h>
-#include <asm/ptrace.h>
+
+struct task_struct;
 
 #ifdef CONFIG_SH_FPU
 static inline void release_fpu(struct pt_regs *regs)
@@ -16,22 +16,23 @@
 	regs->sr &= ~SR_FD;
 }
 
-struct task_struct;
-
 extern void save_fpu(struct task_struct *__tsk);
-void fpu_state_restore(struct pt_regs *regs);
+extern void restore_fpu(struct task_struct *__tsk);
+extern void fpu_state_restore(struct pt_regs *regs);
+extern void __fpu_state_restore(void);
 #else
-
-#define save_fpu(tsk)		do { } while (0)
-#define release_fpu(regs)	do { } while (0)
-#define grab_fpu(regs)		do { } while (0)
-#define fpu_state_restore(regs)	do { } while (0)
-
+#define save_fpu(tsk)			do { } while (0)
+#define restore_fpu(tsk)		do { } while (0)
+#define release_fpu(regs)		do { } while (0)
+#define grab_fpu(regs)			do { } while (0)
+#define fpu_state_restore(regs)		do { } while (0)
+#define __fpu_state_restore(regs)	do { } while (0)
 #endif
 
 struct user_regset;
 
 extern int do_fpu_inst(unsigned short, struct pt_regs *);
+extern int init_fpu(struct task_struct *);
 
 extern int fpregs_get(struct task_struct *target,
 		      const struct user_regset *regset,
@@ -65,18 +66,6 @@
 	preempt_enable();
 }
 
-static inline int init_fpu(struct task_struct *tsk)
-{
-	if (tsk_used_math(tsk)) {
-		if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
-			unlazy_fpu(tsk, task_pt_regs(tsk));
-		return 0;
-	}
-
-	set_stopped_child_used_math(tsk);
-	return 0;
-}
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ASM_SH_FPU_H */
diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h
new file mode 100644
index 0000000..965dd78
--- /dev/null
+++ b/arch/sh/include/asm/hw_breakpoint.h
@@ -0,0 +1,67 @@
+#ifndef __ASM_SH_HW_BREAKPOINT_H
+#define __ASM_SH_HW_BREAKPOINT_H
+
+#ifdef __KERNEL__
+#define __ARCH_HW_BREAKPOINT_H
+
+#include <linux/kdebug.h>
+#include <linux/types.h>
+
+struct arch_hw_breakpoint {
+	char		*name; /* Contains name of the symbol to set bkpt */
+	unsigned long	address;
+	u16		len;
+	u16		type;
+};
+
+enum {
+	SH_BREAKPOINT_READ	= (1 << 1),
+	SH_BREAKPOINT_WRITE	= (1 << 2),
+	SH_BREAKPOINT_RW	= SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
+
+	SH_BREAKPOINT_LEN_1	= (1 << 12),
+	SH_BREAKPOINT_LEN_2	= (1 << 13),
+	SH_BREAKPOINT_LEN_4	= SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
+	SH_BREAKPOINT_LEN_8	= (1 << 14),
+};
+
+struct sh_ubc {
+	const char	*name;
+	unsigned int	num_events;
+	unsigned int	trap_nr;
+	void		(*enable)(struct arch_hw_breakpoint *, int);
+	void		(*disable)(struct arch_hw_breakpoint *, int);
+	void		(*enable_all)(unsigned long);
+	void		(*disable_all)(void);
+	unsigned long	(*active_mask)(void);
+	unsigned long	(*triggered_mask)(void);
+	void		(*clear_triggered_mask)(unsigned long);
+	struct clk	*clk;	/* optional interface clock / MSTP bit */
+};
+
+struct perf_event;
+struct task_struct;
+struct pmu;
+
+/* Maximum number of UBC channels */
+#define HBP_NUM		2
+
+/* arch/sh/kernel/hw_breakpoint.c */
+extern int arch_check_va_in_userspace(unsigned long va, u16 hbp_len);
+extern int arch_validate_hwbkpt_settings(struct perf_event *bp,
+					 struct task_struct *tsk);
+extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+					   unsigned long val, void *data);
+
+int arch_install_hw_breakpoint(struct perf_event *bp);
+void arch_uninstall_hw_breakpoint(struct perf_event *bp);
+void hw_breakpoint_pmu_read(struct perf_event *bp);
+void hw_breakpoint_pmu_unthrottle(struct perf_event *bp);
+
+extern void arch_fill_perf_breakpoint(struct perf_event *bp);
+extern int register_sh_ubc(struct sh_ubc *);
+
+extern struct pmu perf_ops_bp;
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH_HW_BREAKPOINT_H */
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 026dd65..7dab7b23 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -22,6 +22,7 @@
  * for old compat code for I/O offseting to SuperIOs, all of which are
  * better handled through the machvec ioport mapping routines these days.
  */
+#include <linux/errno.h>
 #include <asm/cache.h>
 #include <asm/system.h>
 #include <asm/addrspace.h>
@@ -79,16 +80,51 @@
 #define writel(v,a)		({ __raw_writel((v),(a)); mb(); })
 #define writeq(v,a)		({ __raw_writeq((v),(a)); mb(); })
 
-/* SuperH on-chip I/O functions */
-#define ctrl_inb		__raw_readb
-#define ctrl_inw		__raw_readw
-#define ctrl_inl		__raw_readl
-#define ctrl_inq		__raw_readq
+/*
+ * Legacy SuperH on-chip I/O functions
+ *
+ * These are all deprecated, all new (and especially cross-platform) code
+ * should be using the __raw_xxx() routines directly.
+ */
+static inline u8 __deprecated ctrl_inb(unsigned long addr)
+{
+	return __raw_readb(addr);
+}
 
-#define ctrl_outb		__raw_writeb
-#define ctrl_outw		__raw_writew
-#define ctrl_outl		__raw_writel
-#define ctrl_outq		__raw_writeq
+static inline u16 __deprecated ctrl_inw(unsigned long addr)
+{
+	return __raw_readw(addr);
+}
+
+static inline u32 __deprecated ctrl_inl(unsigned long addr)
+{
+	return __raw_readl(addr);
+}
+
+static inline u64 __deprecated ctrl_inq(unsigned long addr)
+{
+	return __raw_readq(addr);
+}
+
+static inline void __deprecated ctrl_outb(u8 v, unsigned long addr)
+{
+	__raw_writeb(v, addr);
+}
+
+static inline void __deprecated ctrl_outw(u16 v, unsigned long addr)
+{
+	__raw_writew(v, addr);
+}
+
+static inline void __deprecated ctrl_outl(u32 v, unsigned long addr)
+{
+	__raw_writel(v, addr);
+}
+
+static inline void __deprecated ctrl_outq(u64 v, unsigned long addr)
+{
+	__raw_writeq(v, addr);
+}
 
 extern unsigned long generic_io_base;
 
@@ -97,6 +133,28 @@
 	__raw_readw(generic_io_base);
 }
 
+#define __BUILD_UNCACHED_IO(bwlq, type)					\
+static inline type read##bwlq##_uncached(unsigned long addr)		\
+{									\
+	type ret;							\
+	jump_to_uncached();						\
+	ret = __raw_read##bwlq(addr);					\
+	back_to_cached();						\
+	return ret;							\
+}									\
+									\
+static inline void write##bwlq##_uncached(type v, unsigned long addr)	\
+{									\
+	jump_to_uncached();						\
+	__raw_write##bwlq(v, addr);					\
+	back_to_cached();						\
+}
+
+__BUILD_UNCACHED_IO(b, u8)
+__BUILD_UNCACHED_IO(w, u16)
+__BUILD_UNCACHED_IO(l, u32)
+__BUILD_UNCACHED_IO(q, u64)
+
 #define __BUILD_MEMORY_STRING(bwlq, type)				\
 									\
 static inline void __raw_writes##bwlq(volatile void __iomem *mem,	\
@@ -234,28 +292,21 @@
  */
 #ifdef CONFIG_MMU
 void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
-			       unsigned long flags, void *caller);
+			       pgprot_t prot, void *caller);
 void __iounmap(void __iomem *addr);
 
 static inline void __iomem *
-__ioremap(unsigned long offset, unsigned long size, unsigned long flags)
+__ioremap(unsigned long offset, unsigned long size, pgprot_t prot)
 {
-	return __ioremap_caller(offset, size, flags, __builtin_return_address(0));
+	return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
 }
 
 static inline void __iomem *
-__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
+__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
 {
-#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
+#ifdef CONFIG_29BIT
 	unsigned long last_addr = offset + size - 1;
-#endif
-	void __iomem *ret;
 
-	ret = __ioremap_trapped(offset, size);
-	if (ret)
-		return ret;
-
-#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
 	/*
 	 * For P1 and P2 space this is trivial, as everything is already
 	 * mapped. Uncached access for P1 addresses are done through P2.
@@ -263,7 +314,7 @@
 	 * mapping must be done by the PMB or by using page tables.
 	 */
 	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
-		if (unlikely(flags & _PAGE_CACHABLE))
+		if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
 			return (void __iomem *)P1SEGADDR(offset);
 
 		return (void __iomem *)P2SEGADDR(offset);
@@ -274,26 +325,70 @@
 		return (void __iomem *)P4SEGADDR(offset);
 #endif
 
-	return __ioremap(offset, size, flags);
+	return NULL;
+}
+
+static inline void __iomem *
+__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
+{
+	void __iomem *ret;
+
+	ret = __ioremap_trapped(offset, size);
+	if (ret)
+		return ret;
+
+	ret = __ioremap_29bit(offset, size, prot);
+	if (ret)
+		return ret;
+
+	return __ioremap(offset, size, prot);
 }
 #else
-#define __ioremap(offset, size, flags)		((void __iomem *)(offset))
-#define __ioremap_mode(offset, size, flags)	((void __iomem *)(offset))
+#define __ioremap(offset, size, prot)		((void __iomem *)(offset))
+#define __ioremap_mode(offset, size, prot)	((void __iomem *)(offset))
 #define __iounmap(addr)				do { } while (0)
 #endif /* CONFIG_MMU */
 
-#define ioremap(offset, size)				\
-	__ioremap_mode((offset), (size), 0)
-#define ioremap_nocache(offset, size)			\
-	__ioremap_mode((offset), (size), 0)
-#define ioremap_cache(offset, size)			\
-	__ioremap_mode((offset), (size), _PAGE_CACHABLE)
-#define p3_ioremap(offset, size, flags)			\
-	__ioremap((offset), (size), (flags))
-#define ioremap_prot(offset, size, flags)		\
-	__ioremap_mode((offset), (size), (flags))
-#define iounmap(addr)					\
-	__iounmap((addr))
+static inline void __iomem *
+ioremap(unsigned long offset, unsigned long size)
+{
+	return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
+}
+
+static inline void __iomem *
+ioremap_cache(unsigned long offset, unsigned long size)
+{
+	return __ioremap_mode(offset, size, PAGE_KERNEL);
+}
+
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+static inline void __iomem *
+ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags)
+{
+	return __ioremap_mode(offset, size, __pgprot(flags));
+}
+#endif
+
+#ifdef CONFIG_IOREMAP_FIXED
+extern void __iomem *ioremap_fixed(resource_size_t, unsigned long,
+				   unsigned long, pgprot_t);
+extern int iounmap_fixed(void __iomem *);
+extern void ioremap_fixed_init(void);
+#else
+static inline void __iomem *
+ioremap_fixed(resource_size_t phys_addr, unsigned long offset,
+	      unsigned long size, pgprot_t prot)
+{
+	BUG();
+	return NULL;
+}
+
+static inline void ioremap_fixed_init(void) { }
+static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
+#endif
+
+#define ioremap_nocache	ioremap
+#define iounmap		__iounmap
 
 #define maybebadio(port) \
 	printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h
index 985219f..5f6d2e9 100644
--- a/arch/sh/include/asm/kdebug.h
+++ b/arch/sh/include/asm/kdebug.h
@@ -6,6 +6,8 @@
 	DIE_TRAP,
 	DIE_NMI,
 	DIE_OOPS,
+	DIE_BREAKPOINT,
+	DIE_SSTEP,
 };
 
 #endif /* __ASM_SH_KDEBUG_H */
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index c7426ad..15a05b6 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -11,7 +11,9 @@
 
 #define PMB_ADDR		0xf6100000
 #define PMB_DATA		0xf7100000
-#define PMB_ENTRY_MAX		16
+
+#define NR_PMB_ENTRIES		16
+
 #define PMB_E_MASK		0x0000000f
 #define PMB_E_SHIFT		8
 
@@ -25,11 +27,15 @@
 #define PMB_C			0x00000008
 #define PMB_WT			0x00000001
 #define PMB_UB			0x00000200
+#define PMB_CACHE_MASK		(PMB_C | PMB_WT | PMB_UB)
 #define PMB_V			0x00000100
 
 #define PMB_NO_ENTRY		(-1)
 
 #ifndef __ASSEMBLY__
+#include <linux/errno.h>
+#include <linux/threads.h>
+#include <asm/page.h>
 
 /* Default "unsigned long" context */
 typedef unsigned long mm_context_id_t[NR_CPUS];
@@ -47,29 +53,30 @@
 #endif
 } mm_context_t;
 
-struct pmb_entry;
-
-struct pmb_entry {
-	unsigned long vpn;
-	unsigned long ppn;
-	unsigned long flags;
-
-	/*
-	 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
-	 * PMB_NO_ENTRY to search for a free one
-	 */
-	int entry;
-
-	struct pmb_entry *next;
-	/* Adjacent entry link for contiguous multi-entry mappings */
-	struct pmb_entry *link;
-};
-
+#ifdef CONFIG_PMB
 /* arch/sh/mm/pmb.c */
 long pmb_remap(unsigned long virt, unsigned long phys,
-	       unsigned long size, unsigned long flags);
+	       unsigned long size, pgprot_t prot);
 void pmb_unmap(unsigned long addr);
-int pmb_init(void);
+void pmb_init(void);
+bool __in_29bit_mode(void);
+#else
+static inline long pmb_remap(unsigned long virt, unsigned long phys,
+			     unsigned long size, pgprot_t prot)
+{
+	return -EINVAL;
+}
+
+#define pmb_unmap(addr)		do { } while (0)
+#define pmb_init(addr)		do { } while (0)
+
+#ifdef CONFIG_29BIT
+#define __in_29bit_mode()	(1)
+#else
+#define __in_29bit_mode()	(0)
+#endif
+
+#endif /* CONFIG_PMB */
 #endif /* __ASSEMBLY__ */
 
 #endif /* __MMU_H */
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index 41080b1..384c747 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -158,7 +158,7 @@
 	unsigned int cpu = smp_processor_id();
 
 	/* Enable MMU */
-	ctrl_outl(MMU_CONTROL_INIT, MMUCR);
+	__raw_writel(MMU_CONTROL_INIT, MMUCR);
 	ctrl_barrier();
 
 	if (asid_cache(cpu) == NO_CONTEXT)
@@ -171,9 +171,9 @@
 {
 	unsigned long cr;
 
-	cr = ctrl_inl(MMUCR);
+	cr = __raw_readl(MMUCR);
 	cr &= ~MMU_CONTROL_INIT;
-	ctrl_outl(cr, MMUCR);
+	__raw_writel(cr, MMUCR);
 
 	ctrl_barrier();
 }
diff --git a/arch/sh/include/asm/mmu_context_32.h b/arch/sh/include/asm/mmu_context_32.h
index 8ef800c..10e2e17 100644
--- a/arch/sh/include/asm/mmu_context_32.h
+++ b/arch/sh/include/asm/mmu_context_32.h
@@ -49,11 +49,11 @@
 /* MMU_TTB is used for optimizing the fault handling. */
 static inline void set_TTB(pgd_t *pgd)
 {
-	ctrl_outl((unsigned long)pgd, MMU_TTB);
+	__raw_writel((unsigned long)pgd, MMU_TTB);
 }
 
 static inline pgd_t *get_TTB(void)
 {
-	return (pgd_t *)ctrl_inl(MMU_TTB);
+	return (pgd_t *)__raw_readl(MMU_TTB);
 }
 #endif /* __ASM_SH_MMU_CONTEXT_32_H */
diff --git a/arch/sh/include/asm/module.h b/arch/sh/include/asm/module.h
index 068bf16..b7927de 100644
--- a/arch/sh/include/asm/module.h
+++ b/arch/sh/include/asm/module.h
@@ -1,7 +1,22 @@
 #ifndef _ASM_SH_MODULE_H
 #define _ASM_SH_MODULE_H
 
-#include <asm-generic/module.h>
+struct mod_arch_specific {
+#ifdef CONFIG_DWARF_UNWINDER
+	struct list_head fde_list;
+	struct list_head cie_list;
+#endif
+};
+
+#ifdef CONFIG_64BIT
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Sym Elf64_Sym
+#define Elf_Ehdr Elf64_Ehdr
+#else
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Ehdr Elf32_Ehdr
+#endif
 
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
 # ifdef CONFIG_CPU_SH2
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 81bffc0..d71feb3 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -45,6 +45,7 @@
 #endif
 
 #ifndef __ASSEMBLY__
+#include <asm/uncached.h>
 
 extern unsigned long shm_align_mask;
 extern unsigned long max_low_pfn, min_low_pfn;
@@ -56,7 +57,6 @@
 	return (addr1 ^ addr2) & shm_align_mask;
 }
 
-
 #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
 extern void copy_page(void *to, void *from);
 
@@ -88,7 +88,7 @@
 #define __pte(x)	((pte_t) { (x) } )
 #else
 typedef struct { unsigned long long pte_low; } pte_t;
-typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct { unsigned long long pgprot; } pgprot_t;
 typedef struct { unsigned long pgd; } pgd_t;
 #define pte_val(x)	((x).pte_low)
 #define __pte(x)	((pte_t) { (x) } )
@@ -127,12 +127,7 @@
  * is not visible (it is part of the PMB mapping) and so needs to be
  * added or subtracted as required.
  */
-#if defined(CONFIG_PMB_FIXED)
-/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
-#define PMB_OFFSET	(PAGE_OFFSET - PXSEG(__MEMORY_START))
-#define __pa(x)	((unsigned long)(x) - PMB_OFFSET)
-#define __va(x)	((void *)((unsigned long)(x) + PMB_OFFSET))
-#elif defined(CONFIG_32BIT)
+#ifdef CONFIG_PMB
 #define __pa(x)	((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
 #define __va(x)	((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
 #else
@@ -140,6 +135,14 @@
 #define __va(x)	((void *)((unsigned long)(x)+PAGE_OFFSET))
 #endif
 
+#ifdef CONFIG_UNCACHED_MAPPING
+#define UNCAC_ADDR(addr)	((addr) - PAGE_OFFSET + uncached_start)
+#define CAC_ADDR(addr)		((addr) - uncached_start + PAGE_OFFSET)
+#else
+#define UNCAC_ADDR(addr)	((addr))
+#define CAC_ADDR(addr)		((addr))
+#endif
+
 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
 
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h
index 67f3999..1042f7f 100644
--- a/arch/sh/include/asm/pci.h
+++ b/arch/sh/include/asm/pci.h
@@ -15,20 +15,49 @@
  */
 struct pci_channel {
 	struct pci_channel	*next;
+	struct pci_bus		*bus;
 
 	struct pci_ops		*pci_ops;
-	struct resource		*io_resource;
-	struct resource		*mem_resource;
+
+	struct resource		*resources;
+	unsigned int		nr_resources;
 
 	unsigned long		io_offset;
 	unsigned long		mem_offset;
 
 	unsigned long		reg_base;
-
 	unsigned long		io_map_base;
+
+	unsigned int		index;
+	unsigned int		need_domain_info;
+
+	/* Optional error handling */
+	struct timer_list	err_timer, serr_timer;
+	unsigned int		err_irq, serr_irq;
 };
 
-extern void register_pci_controller(struct pci_channel *hose);
+/* arch/sh/drivers/pci/pci.c */
+extern int register_pci_controller(struct pci_channel *hose);
+extern void pcibios_report_status(unsigned int status_mask, int warn);
+
+/* arch/sh/drivers/pci/common.c */
+extern int early_read_config_byte(struct pci_channel *hose, int top_bus,
+				  int bus, int devfn, int offset, u8 *value);
+extern int early_read_config_word(struct pci_channel *hose, int top_bus,
+				  int bus, int devfn, int offset, u16 *value);
+extern int early_read_config_dword(struct pci_channel *hose, int top_bus,
+				   int bus, int devfn, int offset, u32 *value);
+extern int early_write_config_byte(struct pci_channel *hose, int top_bus,
+				   int bus, int devfn, int offset, u8 value);
+extern int early_write_config_word(struct pci_channel *hose, int top_bus,
+				   int bus, int devfn, int offset, u16 value);
+extern int early_write_config_dword(struct pci_channel *hose, int top_bus,
+				    int bus, int devfn, int offset, u32 value);
+extern void pcibios_enable_timers(struct pci_channel *hose);
+extern unsigned int pcibios_handle_status_errors(unsigned long addr,
+				 unsigned int status, struct pci_channel *hose);
+extern int pci_is_66mhz_capable(struct pci_channel *hose,
+				int top_bus, int current_bus);
 
 extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM;
 
@@ -99,20 +128,6 @@
 }
 #endif
 
-#ifdef CONFIG_SUPERH32
-/*
- * If we're on an SH7751 or SH7780 PCI controller, PCI memory is mapped
- * at the end of the address space in a special non-translatable area.
- */
-#define PCI_MEM_FIXED_START	0xfd000000
-#define PCI_MEM_FIXED_END	(PCI_MEM_FIXED_START + 0x01000000)
-
-#define is_pci_memory_fixed_range(s, e)	\
-	((s) >= PCI_MEM_FIXED_START && (e) < PCI_MEM_FIXED_END)
-#else
-#define is_pci_memory_fixed_range(s, e)	(0)
-#endif
-
 /* Board-specific fixup routines. */
 int pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin);
 
@@ -122,6 +137,14 @@
 extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
 				    struct pci_bus_region *region);
 
+#define pci_domain_nr(bus) ((struct pci_channel *)(bus)->sysdata)->index
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+	struct pci_channel *hose = bus->sysdata;
+	return hose->need_domain_info;
+}
+
 /* Chances are this interrupt is wired PC-style ...  */
 static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 {
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 63ca37b..8c00785 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -4,8 +4,16 @@
 #include <linux/quicklist.h>
 #include <asm/page.h>
 
-#define QUICK_PGD 0	/* We preserve special mappings over free */
-#define QUICK_PT 1	/* Other page table pages that are zero on free */
+#define QUICK_PT 0	/* Other page table pages that are zero on free */
+
+extern pgd_t *pgd_alloc(struct mm_struct *);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+
+#if PAGETABLE_LEVELS > 2
+extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
+extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
+extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
+#endif
 
 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
 				       pte_t *pte)
@@ -20,28 +28,9 @@
 }
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
-static inline void pgd_ctor(void *x)
-{
-	pgd_t *pgd = x;
-
-	memcpy(pgd + USER_PTRS_PER_PGD,
-	       swapper_pg_dir + USER_PTRS_PER_PGD,
-	       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-}
-
 /*
  * Allocate and free page tables.
  */
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
-	return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
-}
-
-static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
-	quicklist_free(QUICK_PGD, NULL, pgd);
-}
-
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 					  unsigned long address)
 {
@@ -81,7 +70,6 @@
 
 static inline void check_pgt_cache(void)
 {
-	quicklist_trim(QUICK_PGD, NULL, 25, 16);
 	quicklist_trim(QUICK_PT, NULL, 25, 16);
 }
 
diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h
new file mode 100644
index 0000000..19bd89d
--- /dev/null
+++ b/arch/sh/include/asm/pgtable-2level.h
@@ -0,0 +1,23 @@
+#ifndef __ASM_SH_PGTABLE_2LEVEL_H
+#define __ASM_SH_PGTABLE_2LEVEL_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+/*
+ * traditional two-level paging structure
+ */
+#define PAGETABLE_LEVELS	2
+
+/* PTE bits */
+#define PTE_MAGNITUDE		2	/* 32-bit PTEs */
+
+#define PTE_SHIFT		PAGE_SHIFT
+#define PTE_BITS		(PTE_SHIFT - PTE_MAGNITUDE)
+
+/* PGD bits */
+#define PGDIR_SHIFT		(PTE_SHIFT + PTE_BITS)
+
+#define PTRS_PER_PGD		(PAGE_SIZE / (1 << PTE_MAGNITUDE))
+#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
+
+#endif /* __ASM_SH_PGTABLE_2LEVEL_H */
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
new file mode 100644
index 0000000..249a985
--- /dev/null
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -0,0 +1,56 @@
+#ifndef __ASM_SH_PGTABLE_3LEVEL_H
+#define __ASM_SH_PGTABLE_3LEVEL_H
+
+#include <asm-generic/pgtable-nopud.h>
+
+/*
+ * Some cores need a 3-level page table layout, for example when using
+ * 64-bit PTEs and 4K pages.
+ */
+#define PAGETABLE_LEVELS	3
+
+#define PTE_MAGNITUDE		3	/* 64-bit PTEs on SH-X2 TLB */
+
+/* PGD bits */
+#define PGDIR_SHIFT		30
+
+#define PTRS_PER_PGD		4
+#define USER_PTRS_PER_PGD	2
+
+/* PMD bits */
+#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - PTE_MAGNITUDE))
+#define PMD_SIZE	(1UL << PMD_SHIFT)
+#define PMD_MASK	(~(PMD_SIZE-1))
+
+#define PTRS_PER_PMD	((1 << PGDIR_SHIFT) / PMD_SIZE)
+
+#define pmd_ERROR(e) \
+	printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
+
+typedef struct { unsigned long long pmd; } pmd_t;
+#define pmd_val(x)	((x).pmd)
+#define __pmd(x)	((pmd_t) { (x) } )
+
+static inline unsigned long pud_page_vaddr(pud_t pud)
+{
+	return pud_val(pud);
+}
+
+#define pmd_index(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+}
+
+#define pud_none(x)	(!pud_val(x))
+#define pud_present(x)	(pud_val(x))
+#define pud_clear(xp)	do { set_pud(xp, __pud(0)); } while (0)
+#define	pud_bad(x)	(pud_val(x) & ~PAGE_MASK)
+
+/*
+ * (puds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
+
+#endif /* __ASM_SH_PGTABLE_3LEVEL_H */
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index ba3046e..aab7652 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -12,7 +12,11 @@
 #ifndef __ASM_SH_PGTABLE_H
 #define __ASM_SH_PGTABLE_H
 
-#include <asm-generic/pgtable-nopmd.h>
+#ifdef CONFIG_X2TLB
+#include <asm/pgtable-3level.h>
+#else
+#include <asm/pgtable-2level.h>
+#endif
 #include <asm/page.h>
 
 #ifndef __ASSEMBLY__
@@ -51,28 +55,12 @@
 #define	NPHYS_SIGN	(1LL << (NPHYS - 1))
 #define	NPHYS_MASK	(-1LL << NPHYS)
 
-/*
- * traditional two-level paging structure
- */
-/* PTE bits */
-#if defined(CONFIG_X2TLB) || defined(CONFIG_SUPERH64)
-# define PTE_MAGNITUDE	3	/* 64-bit PTEs on extended mode SH-X2 TLB */
-#else
-# define PTE_MAGNITUDE	2	/* 32-bit PTEs */
-#endif
-#define PTE_SHIFT	PAGE_SHIFT
-#define PTE_BITS	(PTE_SHIFT - PTE_MAGNITUDE)
-
-/* PGD bits */
-#define PGDIR_SHIFT	(PTE_SHIFT + PTE_BITS)
 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
 /* Entries per level */
 #define PTRS_PER_PTE	(PAGE_SIZE / (1 << PTE_MAGNITUDE))
-#define PTRS_PER_PGD	(PAGE_SIZE / sizeof(pgd_t))
 
-#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
 #define FIRST_USER_ADDRESS	0
 
 #define PHYS_ADDR_MASK29		0x1fffffff
@@ -153,9 +141,9 @@
 #define pte_pfn(x)		((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
 
 /*
- * No page table caches to initialise
+ * Initialise the page table caches
  */
-#define pgtable_cache_init()	do { } while (0)
+extern void pgtable_cache_init(void);
 
 struct vm_area_struct;
 
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index 5003ee8..e172d69 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -71,6 +71,8 @@
 #define _PAGE_EXT_KERN_WRITE	0x1000	/* EPR4-bit: Kernel space writable */
 #define _PAGE_EXT_KERN_READ	0x2000	/* EPR5-bit: Kernel space readable */
 
+#define _PAGE_EXT_WIRED		0x4000	/* software: Wire TLB entry */
+
 /* Wrapper for extended mode pgprot twiddling */
 #define _PAGE_EXT(x)		((unsigned long long)(x) << 32)
 
@@ -141,12 +143,14 @@
 # elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
 #  define _PAGE_SZHUGE	(_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3)
 # endif
+# define _PAGE_WIRED	(_PAGE_EXT(_PAGE_EXT_WIRED))
 #else
 # if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
 #  define _PAGE_SZHUGE	(_PAGE_SZ1)
 # elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
 #  define _PAGE_SZHUGE	(_PAGE_SZ0 | _PAGE_SZ1)
 # endif
+# define _PAGE_WIRED	(0)
 #endif
 
 /*
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h
index 17cdbec..0ee4677 100644
--- a/arch/sh/include/asm/pgtable_64.h
+++ b/arch/sh/include/asm/pgtable_64.h
@@ -43,11 +43,6 @@
 }
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 
-static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
-{
-	pmd_val(*pmdp) = (unsigned long) ptep;
-}
-
 /*
  * PGD defines. Top level.
  */
@@ -128,8 +123,21 @@
 #define _PAGE_DIRTY	0x400  /* software: page accessed in write */
 #define _PAGE_ACCESSED	0x800  /* software: page referenced */
 
+/* Wrapper for extended mode pgprot twiddling */
+#define _PAGE_EXT(x)		((unsigned long long)(x) << 32)
+
+/*
+ * We can use the sign-extended bits in the PTEL to get 32 bits of
+ * software flags. This works for now because no implementations uses
+ * anything above the PPN field.
+ */
+#define _PAGE_WIRED	_PAGE_EXT(0x001) /* software: wire the tlb entry */
+
+#define _PAGE_CLEAR_FLAGS	(_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \
+				 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
+
 /* Mask which drops software flags */
-#define _PAGE_FLAGS_HARDWARE_MASK	0xfffffffffffff3dbLL
+#define _PAGE_FLAGS_HARDWARE_MASK	(NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
 
 /*
  * HugeTLB support
@@ -203,12 +211,6 @@
 #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
 
 /*
- * Handling allocation failures during page table setup.
- */
-extern void __handle_bad_pmd_kernel(pmd_t * pmd);
-#define __handle_bad_pmd(x)	__handle_bad_pmd_kernel(x)
-
-/*
  * PTE level access routines.
  *
  * Note1:
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 017e0c1..9605e062 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -98,13 +98,34 @@
 
 /* Forward decl */
 struct seq_operations;
+struct task_struct;
 
 extern struct pt_regs fake_swapper_regs;
 
+/* arch/sh/kernel/process.c */
+extern unsigned int xstate_size;
+extern void free_thread_xstate(struct task_struct *);
+extern struct kmem_cache *task_xstate_cachep;
+
+/* arch/sh/mm/alignment.c */
+extern int get_unalign_ctl(struct task_struct *, unsigned long addr);
+extern int set_unalign_ctl(struct task_struct *, unsigned int val);
+
+#define GET_UNALIGN_CTL(tsk, addr)	get_unalign_ctl((tsk), (addr))
+#define SET_UNALIGN_CTL(tsk, val)	set_unalign_ctl((tsk), (val))
+
+/* arch/sh/mm/init.c */
+extern unsigned int mem_init_done;
+
 /* arch/sh/kernel/setup.c */
 const char *get_cpu_subtype(struct sh_cpuinfo *c);
 extern const struct seq_operations cpuinfo_op;
 
+/* thread_struct flags */
+#define SH_THREAD_UAC_NOPRINT	(1 << 0)
+#define SH_THREAD_UAC_SIGBUS	(1 << 1)
+#define SH_THREAD_UAC_MASK	(SH_THREAD_UAC_NOPRINT | SH_THREAD_UAC_SIGBUS)
+
 /* processor boot mode configuration */
 #define MODE_PIN0 (1 << 0)
 #define MODE_PIN1 (1 << 1)
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 1f3d6fa..572b4eb 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -14,6 +14,7 @@
 #include <asm/page.h>
 #include <asm/types.h>
 #include <asm/ptrace.h>
+#include <asm/hw_breakpoint.h>
 
 /*
  * Default implementation of macro that returns current
@@ -90,9 +91,9 @@
 	unsigned long entry_pc;
 };
 
-union sh_fpu_union {
-	struct sh_fpu_hard_struct hard;
-	struct sh_fpu_soft_struct soft;
+union thread_xstate {
+	struct sh_fpu_hard_struct hardfpu;
+	struct sh_fpu_soft_struct softfpu;
 };
 
 struct thread_struct {
@@ -100,38 +101,30 @@
 	unsigned long sp;
 	unsigned long pc;
 
-	/* Hardware debugging registers */
-	unsigned long ubc_pc;
+	/* Various thread flags, see SH_THREAD_xxx */
+	unsigned long flags;
 
-	/* floating point info */
-	union sh_fpu_union fpu;
+	/* Save middle states of ptrace breakpoints */
+	struct perf_event *ptrace_bps[HBP_NUM];
 
 #ifdef CONFIG_SH_DSP
 	/* Dsp status information */
 	struct sh_dsp_struct dsp_status;
 #endif
-};
 
-/* Count of active tasks with UBC settings */
-extern int ubc_usercnt;
+	/* Extended processor state */
+	union thread_xstate *xstate;
+};
 
 #define INIT_THREAD  {						\
 	.sp = sizeof(init_stack) + (long) &init_stack,		\
+	.flags = 0,						\
 }
 
-/*
- * Do necessary setup to start up a newly executed thread.
- */
-#define start_thread(_regs, new_pc, new_sp)	 \
-	set_fs(USER_DS);			 \
-	_regs->pr = 0;				 \
-	_regs->sr = SR_FD;	/* User mode. */ \
-	_regs->pc = new_pc;			 \
-	_regs->regs[15] = new_sp
-
 /* Forward declaration, a strange C thing */
 struct task_struct;
-struct mm_struct;
+
+extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp);
 
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 5727d31..621bc46 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -87,26 +87,31 @@
 	/* long status; * software status information */
 };
 
-#if 0
 /* Dummy fpu emulator  */
 struct sh_fpu_soft_struct {
-	unsigned long long fp_regs[32];
+	unsigned long fp_regs[64];
 	unsigned int fpscr;
 	unsigned char lookahead;
 	unsigned long entry_pc;
 };
-#endif
 
-union sh_fpu_union {
-	struct sh_fpu_hard_struct hard;
-	/* 'hard' itself only produces 32 bit alignment, yet we need
-	   to access it using 64 bit load/store as well. */
+union thread_xstate {
+	struct sh_fpu_hard_struct hardfpu;
+	struct sh_fpu_soft_struct softfpu;
+	/*
+	 * The structure definitions only produce 32 bit alignment, yet we need
+	 * to access them using 64 bit load/store as well.
+	 */
 	unsigned long long alignment_dummy;
 };
 
 struct thread_struct {
 	unsigned long sp;
 	unsigned long pc;
+
+	/* Various thread flags, see SH_THREAD_xxx */
+	unsigned long flags;
+
 	/* This stores the address of the pt_regs built during a context
 	   switch, or of the register save area built for a kernel mode
 	   exception.  It is used for backtracing the stack of a sleeping task
@@ -122,7 +127,7 @@
 	/* Hardware debugging registers may come here */
 
 	/* floating point info */
-	union sh_fpu_union fpu;
+	union thread_xstate *xstate;
 };
 
 #define INIT_MMAP \
@@ -137,7 +142,7 @@
 	.trap_no	= 0,			\
 	.error_code	= 0,			\
 	.address	= 0,			\
-	.fpu		= { { { 0, } }, }	\
+	.flags		= 0,			\
 }
 
 /*
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index 1dc12cb..e11b14e 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -102,13 +102,15 @@
 #define	PTRACE_GETDSPREGS	55	/* DSP registers */
 #define	PTRACE_SETDSPREGS	56
 
-#define PT_TEXT_END_ADDR 	240
-#define PT_TEXT_ADDR 		244	/* &(struct user)->start_code */
-#define PT_DATA_ADDR 		248	/* &(struct user)->start_data */
+#define PT_TEXT_END_ADDR	240
+#define PT_TEXT_ADDR		244	/* &(struct user)->start_code */
+#define PT_DATA_ADDR		248	/* &(struct user)->start_data */
 #define PT_TEXT_LEN		252
 
 #ifdef __KERNEL__
 #include <asm/addrspace.h>
+#include <asm/page.h>
+#include <asm/system.h>
 
 #define user_mode(regs)			(((regs)->sr & 0x40000000)==0)
 #define instruction_pointer(regs)	((unsigned long)(regs)->pc)
@@ -124,6 +126,12 @@
 extern void user_enable_single_step(struct task_struct *);
 extern void user_disable_single_step(struct task_struct *);
 
+struct perf_event;
+struct perf_sample_data;
+
+extern void ptrace_triggered(struct perf_event *bp, int nmi,
+		      struct perf_sample_data *data, struct pt_regs *regs);
+
 #define task_pt_regs(task) \
 	((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE) - 1)
 
@@ -131,10 +139,8 @@
 {
 	unsigned long pc = instruction_pointer(regs);
 
-#ifdef P2SEG
-	if (pc >= P2SEG && pc < P3SEG)
-		pc -= 0x20000000;
-#endif
+	if (virt_addr_uncached(pc))
+		return CAC_ADDR(pc);
 
 	return pc;
 }
diff --git a/arch/sh/include/asm/reboot.h b/arch/sh/include/asm/reboot.h
new file mode 100644
index 0000000..b3da0c6
--- /dev/null
+++ b/arch/sh/include/asm/reboot.h
@@ -0,0 +1,21 @@
+#ifndef __ASM_SH_REBOOT_H
+#define __ASM_SH_REBOOT_H
+
+#include <linux/kdebug.h>
+
+struct pt_regs;
+
+struct machine_ops {
+	void (*restart)(char *cmd);
+	void (*halt)(void);
+	void (*power_off)(void);
+	void (*shutdown)(void);
+	void (*crash_shutdown)(struct pt_regs *);
+};
+
+extern struct machine_ops machine_ops;
+
+/* arch/sh/kernel/machine_kexec.c */
+void native_machine_crash_shutdown(struct pt_regs *regs);
+
+#endif /* __ASM_SH_REBOOT_H */
diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h
index ce37435..4758325 100644
--- a/arch/sh/include/asm/setup.h
+++ b/arch/sh/include/asm/setup.h
@@ -18,7 +18,6 @@
 /* ... */
 #define COMMAND_LINE ((char *) (PARAM+0x100))
 
-int setup_early_printk(char *);
 void sh_mv_setup(void);
 
 #endif /* __KERNEL__ */
diff --git a/arch/sh/include/asm/sh_bios.h b/arch/sh/include/asm/sh_bios.h
index d9c96d7..95714c2 100644
--- a/arch/sh/include/asm/sh_bios.h
+++ b/arch/sh/include/asm/sh_bios.h
@@ -1,18 +1,27 @@
 #ifndef __ASM_SH_BIOS_H
 #define __ASM_SH_BIOS_H
 
+#ifdef CONFIG_SH_STANDARD_BIOS
+
 /*
  * Copyright (C) 2000 Greg Banks, Mitch Davis
  * C API to interface to the standard LinuxSH BIOS
  * usually from within the early stages of kernel boot.
  */
-
-
 extern void sh_bios_console_write(const char *buf, unsigned int len);
-extern void sh_bios_char_out(char ch);
 extern void sh_bios_gdb_detach(void);
 
 extern void sh_bios_get_node_addr(unsigned char *node_addr);
 extern void sh_bios_shutdown(unsigned int how);
 
+extern void sh_bios_vbr_init(void);
+extern void sh_bios_vbr_reload(void);
+
+#else
+
+static inline void sh_bios_vbr_init(void) { }
+static inline void sh_bios_vbr_reload(void) { }
+
+#endif /* CONFIG_SH_STANDARD_BIOS */
+
 #endif /* __ASM_SH_BIOS_H */
diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h
index fe9c2a1..64eb41a 100644
--- a/arch/sh/include/asm/suspend.h
+++ b/arch/sh/include/asm/suspend.h
@@ -92,5 +92,6 @@
 #define SUSP_SH_USTANDBY	(1 << 3) /* SH-Mobile U-standby mode */
 #define SUSP_SH_SF		(1 << 4) /* Enable self-refresh */
 #define SUSP_SH_MMU		(1 << 5) /* Save/restore MMU and cache */
+#define SUSP_SH_REGS		(1 << 6) /* Save/restore registers */
 
 #endif /* _ASM_SH_SUSPEND_H */
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index c15415b..0bd7a17 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -10,7 +10,6 @@
 #include <linux/compiler.h>
 #include <linux/linkage.h>
 #include <asm/types.h>
-#include <asm/ptrace.h>
 
 #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
 
@@ -32,7 +31,7 @@
 #define mb()		__asm__ __volatile__ ("synco": : :"memory")
 #define rmb()		mb()
 #define wmb()		__asm__ __volatile__ ("synco": : :"memory")
-#define ctrl_barrier()	__icbi(0xa8000000)
+#define ctrl_barrier()	__icbi(PAGE_OFFSET)
 #define read_barrier_depends()	do { } while(0)
 #else
 #define mb()		__asm__ __volatile__ ("": : :"memory")
@@ -114,6 +113,8 @@
 				    (unsigned long)_n_, sizeof(*(ptr))); \
   })
 
+struct pt_regs;
+
 extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
 void free_initmem(void);
 void free_initrd_mem(unsigned long start, unsigned long end);
@@ -137,14 +138,14 @@
 #endif
 
 extern unsigned long cached_to_uncached;
+extern unsigned long uncached_size;
 
 extern struct dentry *sh_debugfs_root;
 
 void per_cpu_trap_init(void);
 void default_idle(void);
 void cpu_idle_wait(void);
-
-asmlinkage void break_point_trap(void);
+void stop_this_cpu(void *);
 
 #ifdef CONFIG_SUPERH32
 #define BUILD_TRAP_HANDLER(name)					\
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index 06814f5..51296b3 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -2,6 +2,7 @@
 #define __ASM_SH_SYSTEM_32_H
 
 #include <linux/types.h>
+#include <asm/mmu.h>
 
 #ifdef CONFIG_SH_DSP
 
@@ -144,9 +145,6 @@
 		__restore_dsp(prev);				\
 } while (0)
 
-#define __uses_jump_to_uncached \
-	noinline __attribute__ ((__section__ (".uncached.text")))
-
 /*
  * Jump to uncached area.
  * When handling TLB or caches, we need to do it from an uncached area.
@@ -216,6 +214,17 @@
 int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
 			    struct mem_access *ma, int);
 
+static inline void trigger_address_error(void)
+{
+	if (__in_29bit_mode())
+		__asm__ __volatile__ (
+			"ldc %0, sr\n\t"
+			"mov.l @%1, %0"
+			:
+			: "r" (0x10000000), "r" (0x80000001)
+		);
+}
+
 asmlinkage void do_address_error(struct pt_regs *regs,
 				 unsigned long writeaccess,
 				 unsigned long address);
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h
index ab1dd91..3633864 100644
--- a/arch/sh/include/asm/system_64.h
+++ b/arch/sh/include/asm/system_64.h
@@ -18,6 +18,7 @@
 /*
  *	switch_to() should switch tasks to task nr n, first
  */
+struct thread_struct;
 struct task_struct *sh64_switch_to(struct task_struct *prev,
 				   struct thread_struct *prev_thread,
 				   struct task_struct *next,
@@ -33,8 +34,6 @@
 			      &next->thread);			\
 } while (0)
 
-#define __uses_jump_to_uncached
-
 #define jump_to_uncached()	do { } while (0)
 #define back_to_cached()	do { } while (0)
 
@@ -48,6 +47,13 @@
 	return (unsigned long long)(signed long long)(signed long)val;
 }
 
+extern void phys_stext(void);
+
+static inline void trigger_address_error(void)
+{
+	phys_stext();
+}
+
 #define SR_BL_LL	0x0000000010000000LL
 
 static inline void set_bl_bit(void)
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 1f3d927..55a36fe 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -93,14 +93,16 @@
 
 #define THREAD_SIZE_ORDER	(THREAD_SHIFT - PAGE_SHIFT)
 
-#else /* THREAD_SHIFT < PAGE_SHIFT */
-
-#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
+#endif
 
 extern struct thread_info *alloc_thread_info(struct task_struct *tsk);
 extern void free_thread_info(struct thread_info *ti);
+extern void arch_task_cache_init(void);
+#define arch_task_cache_init arch_task_cache_init
+extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+extern void init_thread_xstate(void);
 
-#endif /* THREAD_SHIFT < PAGE_SHIFT */
+#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index da8fe7a..75abb38 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -11,6 +11,7 @@
 #ifdef CONFIG_MMU
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
 
 /*
  * TLB handling.  This allows us to remove pages from the page
@@ -97,6 +98,22 @@
 
 #define tlb_migrate_finish(mm)		do { } while (0)
 
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
+extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
+extern void tlb_unwire_entry(void);
+#else
+static inline void tlb_wire_entry(struct vm_area_struct *vma ,
+				  unsigned long addr, pte_t pte)
+{
+	BUG();
+}
+
+static inline void tlb_unwire_entry(void)
+{
+	BUG();
+}
+#endif
+
 #else /* CONFIG_MMU */
 
 #define tlb_start_vma(tlb, vma)				do { } while (0)
diff --git a/arch/sh/include/asm/ubc.h b/arch/sh/include/asm/ubc.h
deleted file mode 100644
index 9bf9616..0000000
--- a/arch/sh/include/asm/ubc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * include/asm-sh/ubc.h
- *
- * Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2002, 2003 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#ifndef __ASM_SH_UBC_H
-#define __ASM_SH_UBC_H
-#ifdef __KERNEL__
-
-#include <cpu/ubc.h>
-
-/* User Break Controller */
-#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709)
-#define UBC_TYPE_SH7729	(current_cpu_data.type == CPU_SH7729)
-#else
-#define UBC_TYPE_SH7729	0
-#endif
-
-#define BAMR_ASID		(1 << 2)
-#define BAMR_NONE		0
-#define BAMR_10			0x1
-#define BAMR_12			0x2
-#define BAMR_ALL		0x3
-#define BAMR_16			0x8
-#define BAMR_20			0x9
-
-#define BBR_INST		(1 << 4)
-#define BBR_DATA		(2 << 4)
-#define BBR_READ		(1 << 2)
-#define BBR_WRITE		(2 << 2)
-#define BBR_BYTE		0x1
-#define BBR_HALF		0x2
-#define BBR_LONG		0x3
-#define BBR_QUAD		(1 << 6)	/* SH7750 */
-#define BBR_CPU			(1 << 6)	/* SH7709A,SH7729 */
-#define BBR_DMA			(2 << 6)	/* SH7709A,SH7729 */
-
-#define BRCR_CMFA		(1 << 15)
-#define BRCR_CMFB		(1 << 14)
-
-#if defined CONFIG_CPU_SH2A
-#define BRCR_CMFCA		(1 << 15)
-#define BRCR_CMFCB		(1 << 14)
-#define BRCR_CMFDA		(1 << 13)
-#define BRCR_CMFDB		(1 << 12)
-#define BRCR_PCBB		(1 << 6)	/* 1: after execution */
-#define BRCR_PCBA		(1 << 5)	/* 1: after execution */
-#define BRCR_PCTE		0
-#else
-#define BRCR_PCTE		(1 << 11)
-#define BRCR_PCBA		(1 << 10)	/* 1: after execution */
-#define BRCR_DBEB		(1 << 7)
-#define BRCR_PCBB		(1 << 6)
-#define BRCR_SEQ		(1 << 3)
-#define BRCR_UBDE		(1 << 0)
-#endif
-
-#endif /* __KERNEL__ */
-#endif /* __ASM_SH_UBC_H */
diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h
new file mode 100644
index 0000000..e3419f9
--- /dev/null
+++ b/arch/sh/include/asm/uncached.h
@@ -0,0 +1,18 @@
+#ifndef __ASM_SH_UNCACHED_H
+#define __ASM_SH_UNCACHED_H
+
+#include <linux/bug.h>
+
+#ifdef CONFIG_UNCACHED_MAPPING
+extern unsigned long uncached_start, uncached_end;
+
+extern int virt_addr_uncached(unsigned long kaddr);
+extern void uncached_init(void);
+extern void uncached_resize(unsigned long size);
+#else
+#define virt_addr_uncached(kaddr)	(0)
+#define uncached_init()			do { } while (0)
+#define uncached_resize(size)		BUG()
+#endif
+
+#endif /* __ASM_SH_UNCACHED_H */
diff --git a/arch/sh/include/asm/vmlinux.lds.h b/arch/sh/include/asm/vmlinux.lds.h
index 244ec4a..d58ad49 100644
--- a/arch/sh/include/asm/vmlinux.lds.h
+++ b/arch/sh/include/asm/vmlinux.lds.h
@@ -14,4 +14,12 @@
 #define DWARF_EH_FRAME
 #endif
 
+#ifdef CONFIG_SUPERH64
+#define EXTRA_TEXT		\
+	*(.text64)		\
+	*(.text..SHmedia32)
+#else
+#define EXTRA_TEXT
+#endif
+
 #endif /* __ASM_SH_VMLINUX_LDS_H */
diff --git a/arch/sh/include/asm/watchdog.h b/arch/sh/include/asm/watchdog.h
index 19dfff5..85a7aca 100644
--- a/arch/sh/include/asm/watchdog.h
+++ b/arch/sh/include/asm/watchdog.h
@@ -70,7 +70,7 @@
  */
 static inline __u32 sh_wdt_read_cnt(void)
 {
-	return ctrl_inl(WTCNT_R);
+	return __raw_readl(WTCNT_R);
 }
 
 /**
@@ -82,7 +82,7 @@
  */
 static inline void sh_wdt_write_cnt(__u32 val)
 {
-	ctrl_outl((WTCNT_HIGH << 24) | (__u32)val, WTCNT);
+	__raw_writel((WTCNT_HIGH << 24) | (__u32)val, WTCNT);
 }
 
 /**
@@ -94,7 +94,7 @@
  */
 static inline void sh_wdt_write_bst(__u32 val)
 {
-	ctrl_outl((WTBST_HIGH << 24) | (__u32)val, WTBST);
+	__raw_writel((WTBST_HIGH << 24) | (__u32)val, WTBST);
 }
 /**
  * 	sh_wdt_read_csr - Read from Control/Status Register
@@ -103,7 +103,7 @@
  */
 static inline __u32 sh_wdt_read_csr(void)
 {
-	return ctrl_inl(WTCSR_R);
+	return __raw_readl(WTCSR_R);
 }
 
 /**
@@ -115,7 +115,7 @@
  */
 static inline void sh_wdt_write_csr(__u32 val)
 {
-	ctrl_outl((WTCSR_HIGH << 24) | (__u32)val, WTCSR);
+	__raw_writel((WTCSR_HIGH << 24) | (__u32)val, WTCSR);
 }
 #else
 /**
@@ -124,7 +124,7 @@
  */
 static inline __u8 sh_wdt_read_cnt(void)
 {
-	return ctrl_inb(WTCNT_R);
+	return __raw_readb(WTCNT_R);
 }
 
 /**
@@ -136,7 +136,7 @@
  */
 static inline void sh_wdt_write_cnt(__u8 val)
 {
-	ctrl_outw((WTCNT_HIGH << 8) | (__u16)val, WTCNT);
+	__raw_writew((WTCNT_HIGH << 8) | (__u16)val, WTCNT);
 }
 
 /**
@@ -146,7 +146,7 @@
  */
 static inline __u8 sh_wdt_read_csr(void)
 {
-	return ctrl_inb(WTCSR_R);
+	return __raw_readb(WTCSR_R);
 }
 
 /**
@@ -158,7 +158,7 @@
  */
 static inline void sh_wdt_write_csr(__u8 val)
 {
-	ctrl_outw((WTCSR_HIGH << 8) | (__u16)val, WTCSR);
+	__raw_writew((WTCSR_HIGH << 8) | (__u16)val, WTCSR);
 }
 #endif /* CONFIG_CPU_SUBTYPE_SH7785 || CONFIG_CPU_SUBTYPE_SH7780 */
 #endif /* __KERNEL__ */
diff --git a/arch/sh/include/cpu-sh2/cpu/ubc.h b/arch/sh/include/cpu-sh2/cpu/ubc.h
deleted file mode 100644
index ba0e87f..0000000
--- a/arch/sh/include/cpu-sh2/cpu/ubc.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * include/asm-sh/cpu-sh2/ubc.h
- *
- * Copyright (C) 2003 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#ifndef __ASM_CPU_SH2_UBC_H
-#define __ASM_CPU_SH2_UBC_H
-
-#define UBC_BARA                0xffffff40
-#define UBC_BAMRA               0xffffff44
-#define UBC_BBRA                0xffffff48
-#define UBC_BARB                0xffffff60
-#define UBC_BAMRB               0xffffff64
-#define UBC_BBRB                0xffffff68
-#define UBC_BDRB                0xffffff70
-#define UBC_BDMRB               0xffffff74
-#define UBC_BRCR                0xffffff78
-
-/*
- * We don't have any ASID changes to make in the UBC on the SH-2.
- *
- * Make these purposely invalid to track misuse.
- */
-#define UBC_BASRA		0x00000000
-#define UBC_BASRB		0x00000000
-
-#endif /* __ASM_CPU_SH2_UBC_H */
-
diff --git a/arch/sh/include/cpu-sh2/cpu/watchdog.h b/arch/sh/include/cpu-sh2/cpu/watchdog.h
index 393161c..1eab8aa 100644
--- a/arch/sh/include/cpu-sh2/cpu/watchdog.h
+++ b/arch/sh/include/cpu-sh2/cpu/watchdog.h
@@ -44,7 +44,7 @@
 	/*
 	 * Same read/write brain-damage as for WTCNT here..
 	 */
-	return ctrl_inb(RSTCSR_R);
+	return __raw_readb(RSTCSR_R);
 }
 
 /**
@@ -62,7 +62,7 @@
 	 * we can't presently touch the WOVF bit, since the upper byte
 	 * has to be swapped for this. So just leave it alone..
 	 */
-	ctrl_outw((WTCNT_HIGH << 8) | (__u16)val, RSTCSR);
+	__raw_writeb((WTCNT_HIGH << 8) | (__u16)val, RSTCSR);
 }
 
 #endif /* __ASM_CPU_SH2_WATCHDOG_H */
diff --git a/arch/sh/include/cpu-sh3/cpu/dac.h b/arch/sh/include/cpu-sh3/cpu/dac.h
index 05fda83..98f1d15 100644
--- a/arch/sh/include/cpu-sh3/cpu/dac.h
+++ b/arch/sh/include/cpu-sh3/cpu/dac.h
@@ -17,25 +17,25 @@
 static __inline__ void sh_dac_enable(int channel)
 {
 	unsigned char v;
-	v = ctrl_inb(DACR);
+	v = __raw_readb(DACR);
 	if(channel) v |= DACR_DAOE1;
 	else v |= DACR_DAOE0;
-	ctrl_outb(v,DACR);
+	__raw_writeb(v,DACR);
 }
 
 static __inline__ void sh_dac_disable(int channel)
 {
 	unsigned char v;
-	v = ctrl_inb(DACR);
+	v = __raw_readb(DACR);
 	if(channel) v &= ~DACR_DAOE1;
 	else v &= ~DACR_DAOE0;
-	ctrl_outb(v,DACR);
+	__raw_writeb(v,DACR);
 }
 
 static __inline__ void sh_dac_output(u8 value, int channel)
 {
-	if(channel) ctrl_outb(value,DADR1);
-	else ctrl_outb(value,DADR0);
+	if(channel) __raw_writeb(value,DADR1);
+	else __raw_writeb(value,DADR0);
 }
 
 #endif /* __ASM_CPU_SH3_DAC_H */
diff --git a/arch/sh/include/cpu-sh3/cpu/dma.h b/arch/sh/include/cpu-sh3/cpu/dma.h
index 0ea15f3..207811a 100644
--- a/arch/sh/include/cpu-sh3/cpu/dma.h
+++ b/arch/sh/include/cpu-sh3/cpu/dma.h
@@ -20,8 +20,10 @@
 #define TS_32		0x00000010
 #define TS_128		0x00000018
 
-#define CHCR_TS_MASK	0x18
-#define CHCR_TS_SHIFT	3
+#define CHCR_TS_LOW_MASK	0x18
+#define CHCR_TS_LOW_SHIFT	3
+#define CHCR_TS_HIGH_MASK	0
+#define CHCR_TS_HIGH_SHIFT	0
 
 #define DMAOR_INIT	DMAOR_DME
 
@@ -36,11 +38,13 @@
 	XMIT_SZ_128BIT,
 };
 
-static unsigned int ts_shift[] __maybe_unused = {
-	[XMIT_SZ_8BIT]		= 0,
-	[XMIT_SZ_16BIT]		= 1,
-	[XMIT_SZ_32BIT]		= 2,
-	[XMIT_SZ_128BIT]	= 4,
-};
+#define TS_SHIFT {			\
+	[XMIT_SZ_8BIT]		= 0,	\
+	[XMIT_SZ_16BIT]		= 1,	\
+	[XMIT_SZ_32BIT]		= 2,	\
+	[XMIT_SZ_128BIT]	= 4,	\
+}
+
+#define TS_INDEX2VAL(i)	(((i) & 3) << CHCR_TS_LOW_SHIFT)
 
 #endif /* __ASM_CPU_SH3_DMA_H */
diff --git a/arch/sh/include/cpu-sh3/cpu/ubc.h b/arch/sh/include/cpu-sh3/cpu/ubc.h
deleted file mode 100644
index 4e6381d..0000000
--- a/arch/sh/include/cpu-sh3/cpu/ubc.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * include/asm-sh/cpu-sh3/ubc.h
- *
- * Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2003 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#ifndef __ASM_CPU_SH3_UBC_H
-#define __ASM_CPU_SH3_UBC_H
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7710) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7720) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7721)
-#define UBC_BARA		0xa4ffffb0
-#define UBC_BAMRA		0xa4ffffb4
-#define UBC_BBRA		0xa4ffffb8
-#define UBC_BASRA		0xffffffe4
-#define UBC_BARB		0xa4ffffa0
-#define UBC_BAMRB		0xa4ffffa4
-#define UBC_BBRB		0xa4ffffa8
-#define UBC_BASRB		0xffffffe8
-#define UBC_BDRB		0xa4ffff90
-#define UBC_BDMRB		0xa4ffff94
-#define UBC_BRCR		0xa4ffff98
-#else
-#define UBC_BARA                0xffffffb0
-#define UBC_BAMRA               0xffffffb4
-#define UBC_BBRA                0xffffffb8
-#define UBC_BASRA               0xffffffe4
-#define UBC_BARB                0xffffffa0
-#define UBC_BAMRB               0xffffffa4
-#define UBC_BBRB                0xffffffa8
-#define UBC_BASRB               0xffffffe8
-#define UBC_BDRB                0xffffff90
-#define UBC_BDMRB               0xffffff94
-#define UBC_BRCR                0xffffff98
-#endif
-
-#endif /* __ASM_CPU_SH3_UBC_H */
diff --git a/arch/sh/include/cpu-sh4/cpu/addrspace.h b/arch/sh/include/cpu-sh4/cpu/addrspace.h
index a3fa733..d51da25 100644
--- a/arch/sh/include/cpu-sh4/cpu/addrspace.h
+++ b/arch/sh/include/cpu-sh4/cpu/addrspace.h
@@ -28,6 +28,15 @@
 #define P4SEG_TLB_DATA	0xf7000000
 #define P4SEG_REG_BASE	0xff000000
 
+#define PA_AREA0	0x00000000
+#define PA_AREA1	0x04000000
+#define PA_AREA2	0x08000000
+#define PA_AREA3	0x0c000000
+#define PA_AREA4	0x10000000
+#define PA_AREA5	0x14000000
+#define PA_AREA6	0x18000000
+#define PA_AREA7	0x1c000000
+
 #define PA_AREA5_IO	0xb4000000	/* Area 5 IO Memory */
 #define PA_AREA6_IO	0xb8000000	/* Area 6 IO Memory */
 
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h
index c4ed660..e734ea4 100644
--- a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h
+++ b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h
@@ -2,22 +2,38 @@
 #define __ASM_SH_CPU_SH4_DMA_SH7780_H
 
 #if defined(CONFIG_CPU_SUBTYPE_SH7343) || \
-	defined(CONFIG_CPU_SUBTYPE_SH7722) || \
 	defined(CONFIG_CPU_SUBTYPE_SH7730)
 #define DMTE0_IRQ	48
 #define DMTE4_IRQ	76
 #define DMAE0_IRQ	78	/* DMA Error IRQ*/
 #define SH_DMAC_BASE0	0xFE008020
-#define SH_DMARS_BASE	0xFE009000
+#define SH_DMARS_BASE0	0xFE009000
+#define CHCR_TS_LOW_MASK	0x00000018
+#define CHCR_TS_LOW_SHIFT	3
+#define CHCR_TS_HIGH_MASK	0
+#define CHCR_TS_HIGH_SHIFT	0
+#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
+#define DMTE0_IRQ	48
+#define DMTE4_IRQ	76
+#define DMAE0_IRQ	78	/* DMA Error IRQ*/
+#define SH_DMAC_BASE0	0xFE008020
+#define SH_DMARS_BASE0	0xFE009000
+#define CHCR_TS_LOW_MASK	0x00000018
+#define CHCR_TS_LOW_SHIFT	3
+#define CHCR_TS_HIGH_MASK	0x00300000
+#define CHCR_TS_HIGH_SHIFT	20
 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
 	defined(CONFIG_CPU_SUBTYPE_SH7764)
 #define DMTE0_IRQ	34
 #define DMTE4_IRQ	44
 #define DMAE0_IRQ	38
 #define SH_DMAC_BASE0	0xFF608020
-#define SH_DMARS_BASE	0xFF609000
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7724)
+#define SH_DMARS_BASE0	0xFF609000
+#define CHCR_TS_LOW_MASK	0x00000018
+#define CHCR_TS_LOW_SHIFT	3
+#define CHCR_TS_HIGH_MASK	0
+#define CHCR_TS_HIGH_SHIFT	0
+#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
 #define DMTE0_IRQ	48	/* DMAC0A*/
 #define DMTE4_IRQ	76	/* DMAC0B */
 #define DMTE6_IRQ	40
@@ -29,7 +45,29 @@
 #define DMAE1_IRQ	74	/* DMA Error IRQ*/
 #define SH_DMAC_BASE0	0xFE008020
 #define SH_DMAC_BASE1	0xFDC08020
-#define SH_DMARS_BASE	0xFDC09000
+#define SH_DMARS_BASE0	0xFDC09000
+#define CHCR_TS_LOW_MASK	0x00000018
+#define CHCR_TS_LOW_SHIFT	3
+#define CHCR_TS_HIGH_MASK	0
+#define CHCR_TS_HIGH_SHIFT	0
+#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
+#define DMTE0_IRQ	48	/* DMAC0A*/
+#define DMTE4_IRQ	76	/* DMAC0B */
+#define DMTE6_IRQ	40
+#define DMTE8_IRQ	42	/* DMAC1A */
+#define DMTE9_IRQ	43
+#define DMTE10_IRQ	72	/* DMAC1B */
+#define DMTE11_IRQ	73
+#define DMAE0_IRQ	78	/* DMA Error IRQ*/
+#define DMAE1_IRQ	74	/* DMA Error IRQ*/
+#define SH_DMAC_BASE0	0xFE008020
+#define SH_DMAC_BASE1	0xFDC08020
+#define SH_DMARS_BASE0	0xFE009000
+#define SH_DMARS_BASE1	0xFDC09000
+#define CHCR_TS_LOW_MASK	0x00000018
+#define CHCR_TS_LOW_SHIFT	3
+#define CHCR_TS_HIGH_MASK	0x00600000
+#define CHCR_TS_HIGH_SHIFT	21
 #elif defined(CONFIG_CPU_SUBTYPE_SH7780)
 #define DMTE0_IRQ	34
 #define DMTE4_IRQ	44
@@ -41,7 +79,11 @@
 #define DMAE0_IRQ	38	/* DMA Error IRQ */
 #define SH_DMAC_BASE0	0xFC808020
 #define SH_DMAC_BASE1	0xFC818020
-#define SH_DMARS_BASE	0xFC809000
+#define SH_DMARS_BASE0	0xFC809000
+#define CHCR_TS_LOW_MASK	0x00000018
+#define CHCR_TS_LOW_SHIFT	3
+#define CHCR_TS_HIGH_MASK	0
+#define CHCR_TS_HIGH_SHIFT	0
 #else /* SH7785 */
 #define DMTE0_IRQ	33
 #define DMTE4_IRQ	37
@@ -54,18 +96,17 @@
 #define DMAE1_IRQ	58	/* DMA Error IRQ1 */
 #define SH_DMAC_BASE0	0xFC808020
 #define SH_DMAC_BASE1	0xFCC08020
-#define SH_DMARS_BASE	0xFC809000
+#define SH_DMARS_BASE0	0xFC809000
+#define CHCR_TS_LOW_MASK	0x00000018
+#define CHCR_TS_LOW_SHIFT	3
+#define CHCR_TS_HIGH_MASK	0
+#define CHCR_TS_HIGH_SHIFT	0
 #endif
 
-#define REQ_HE	0x000000C0
-#define REQ_H	0x00000080
-#define REQ_LE	0x00000040
-#define TM_BURST 0x0000020
-#define TS_8	0x00000000
-#define TS_16	0x00000008
-#define TS_32	0x00000010
-#define TS_16BLK	0x00000018
-#define TS_32BLK	0x00100000
+#define REQ_HE		0x000000C0
+#define REQ_H		0x00000080
+#define REQ_LE		0x00000040
+#define TM_BURST	0x00000020
 
 /*
  * The SuperH DMAC supports a number of transmit sizes, we list them here,
@@ -74,22 +115,31 @@
  * Defaults to a 64-bit transfer size.
  */
 enum {
-	XMIT_SZ_8BIT,
-	XMIT_SZ_16BIT,
-	XMIT_SZ_32BIT,
-	XMIT_SZ_128BIT,
-	XMIT_SZ_256BIT,
+	XMIT_SZ_8BIT		= 0,
+	XMIT_SZ_16BIT		= 1,
+	XMIT_SZ_32BIT		= 2,
+	XMIT_SZ_64BIT		= 7,
+	XMIT_SZ_128BIT		= 3,
+	XMIT_SZ_256BIT		= 4,
+	XMIT_SZ_128BIT_BLK	= 0xb,
+	XMIT_SZ_256BIT_BLK	= 0xc,
 };
 
 /*
  * The DMA count is defined as the number of bytes to transfer.
  */
-static unsigned int ts_shift[] __maybe_unused = {
-	[XMIT_SZ_8BIT]		= 0,
-	[XMIT_SZ_16BIT]		= 1,
-	[XMIT_SZ_32BIT]		= 2,
-	[XMIT_SZ_128BIT]	= 4,
-	[XMIT_SZ_256BIT]	= 5,
-};
+#define TS_SHIFT {			\
+	[XMIT_SZ_8BIT]		= 0,	\
+	[XMIT_SZ_16BIT]		= 1,	\
+	[XMIT_SZ_32BIT]		= 2,	\
+	[XMIT_SZ_64BIT]		= 3,	\
+	[XMIT_SZ_128BIT]	= 4,	\
+	[XMIT_SZ_256BIT]	= 5,	\
+	[XMIT_SZ_128BIT_BLK]	= 4,	\
+	[XMIT_SZ_256BIT_BLK]	= 5,	\
+}
+
+#define TS_INDEX2VAL(i)	((((i) & 3) << CHCR_TS_LOW_SHIFT) | \
+			 ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT))
 
 #endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */
diff --git a/arch/sh/include/cpu-sh4/cpu/dma.h b/arch/sh/include/cpu-sh4/cpu/dma.h
index bcb3024..114a369 100644
--- a/arch/sh/include/cpu-sh4/cpu/dma.h
+++ b/arch/sh/include/cpu-sh4/cpu/dma.h
@@ -6,8 +6,6 @@
 #ifdef CONFIG_CPU_SH4A
 
 #define DMAOR_INIT	(DMAOR_DME)
-#define CHCR_TS_MASK	0x18
-#define CHCR_TS_SHIFT	3
 
 #include <cpu/dma-sh4a.h>
 #else /* CONFIG_CPU_SH4A */
@@ -29,8 +27,10 @@
 #define TS_32		0x00000030
 #define TS_64		0x00000000
 
-#define CHCR_TS_MASK	0x70
-#define CHCR_TS_SHIFT	4
+#define CHCR_TS_LOW_MASK	0x70
+#define CHCR_TS_LOW_SHIFT	4
+#define CHCR_TS_HIGH_MASK	0
+#define CHCR_TS_HIGH_SHIFT	0
 
 #define DMAOR_COD	0x00000008
 
@@ -41,23 +41,26 @@
  * Defaults to a 64-bit transfer size.
  */
 enum {
-	XMIT_SZ_64BIT,
-	XMIT_SZ_8BIT,
-	XMIT_SZ_16BIT,
-	XMIT_SZ_32BIT,
-	XMIT_SZ_256BIT,
+	XMIT_SZ_8BIT	= 1,
+	XMIT_SZ_16BIT	= 2,
+	XMIT_SZ_32BIT	= 3,
+	XMIT_SZ_64BIT	= 0,
+	XMIT_SZ_256BIT	= 4,
 };
 
 /*
  * The DMA count is defined as the number of bytes to transfer.
  */
-static unsigned int ts_shift[] __maybe_unused = {
-	[XMIT_SZ_64BIT]		= 3,
-	[XMIT_SZ_8BIT]		= 0,
-	[XMIT_SZ_16BIT]		= 1,
-	[XMIT_SZ_32BIT]		= 2,
-	[XMIT_SZ_256BIT]	= 5,
-};
+#define TS_SHIFT {			\
+	[XMIT_SZ_8BIT]		= 0,	\
+	[XMIT_SZ_16BIT]		= 1,	\
+	[XMIT_SZ_32BIT]		= 2,	\
+	[XMIT_SZ_64BIT]		= 3,	\
+	[XMIT_SZ_256BIT]	= 5,	\
+}
+
+#define TS_INDEX2VAL(i)	(((i) & 7) << CHCR_TS_LOW_SHIFT)
+
 #endif
 
 #endif /* __ASM_CPU_SH4_DMA_H */
diff --git a/arch/sh/include/cpu-sh4/cpu/mmu_context.h b/arch/sh/include/cpu-sh4/cpu/mmu_context.h
index 3ce7ef6..03ea75c 100644
--- a/arch/sh/include/cpu-sh4/cpu/mmu_context.h
+++ b/arch/sh/include/cpu-sh4/cpu/mmu_context.h
@@ -25,6 +25,10 @@
 
 #define MMUCR_TI		(1<<2)
 
+#define MMUCR_URB		0x00FC0000
+#define MMUCR_URB_SHIFT		18
+#define MMUCR_URB_NENTRIES	64
+
 #if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40)
 #define MMUCR_SE		(1 << 4)
 #else
diff --git a/arch/sh/include/cpu-sh4/cpu/sq.h b/arch/sh/include/cpu-sh4/cpu/sq.h
index 586d649..74716ba 100644
--- a/arch/sh/include/cpu-sh4/cpu/sq.h
+++ b/arch/sh/include/cpu-sh4/cpu/sq.h
@@ -12,6 +12,7 @@
 #define __ASM_CPU_SH4_SQ_H
 
 #include <asm/addrspace.h>
+#include <asm/page.h>
 
 /*
  * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be
@@ -28,7 +29,7 @@
 
 /* arch/sh/kernel/cpu/sh4/sq.c */
 unsigned long sq_remap(unsigned long phys, unsigned int size,
-		       const char *name, unsigned long flags);
+		       const char *name, pgprot_t prot);
 void sq_unmap(unsigned long vaddr);
 void sq_flush_range(unsigned long start, unsigned int len);
 
diff --git a/arch/sh/include/cpu-sh4/cpu/ubc.h b/arch/sh/include/cpu-sh4/cpu/ubc.h
deleted file mode 100644
index c86e170..0000000
--- a/arch/sh/include/cpu-sh4/cpu/ubc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * include/asm-sh/cpu-sh4/ubc.h
- *
- * Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2003 Paul Mundt
- * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#ifndef __ASM_CPU_SH4_UBC_H
-#define __ASM_CPU_SH4_UBC_H
-
-#if defined(CONFIG_CPU_SH4A)
-#define UBC_CBR0		0xff200000
-#define UBC_CRR0		0xff200004
-#define UBC_CAR0		0xff200008
-#define UBC_CAMR0		0xff20000c
-#define UBC_CBR1		0xff200020
-#define UBC_CRR1		0xff200024
-#define UBC_CAR1		0xff200028
-#define UBC_CAMR1		0xff20002c
-#define UBC_CDR1		0xff200030
-#define UBC_CDMR1		0xff200034
-#define UBC_CETR1		0xff200038
-#define UBC_CCMFR		0xff200600
-#define UBC_CBCR		0xff200620
-
-/* CBR	*/
-#define UBC_CBR_AIE		(0x01<<30)
-#define UBC_CBR_ID_INST		(0x01<<4)
-#define UBC_CBR_RW_READ		(0x01<<1)
-#define UBC_CBR_CE		(0x01)
-
-#define	UBC_CBR_AIV_MASK	(0x00FF0000)
-#define	UBC_CBR_AIV_SHIFT	(16)
-#define UBC_CBR_AIV_SET(asid)	(((asid)<<UBC_CBR_AIV_SHIFT) & UBC_CBR_AIV_MASK)
-
-#define UBC_CBR_INIT		0x20000000
-
-/* CRR	*/
-#define UBC_CRR_RES		(0x01<<13)
-#define UBC_CRR_PCB		(0x01<<1)
-#define UBC_CRR_BIE		(0x01)
-
-#define UBC_CRR_INIT		0x00002000
-
-#else	/* CONFIG_CPU_SH4 */
-#define UBC_BARA		0xff200000
-#define UBC_BAMRA		0xff200004
-#define UBC_BBRA		0xff200008
-#define UBC_BASRA		0xff000014
-#define UBC_BARB		0xff20000c
-#define UBC_BAMRB		0xff200010
-#define UBC_BBRB		0xff200014
-#define UBC_BASRB		0xff000018
-#define UBC_BDRB		0xff200018
-#define UBC_BDMRB		0xff20001c
-#define UBC_BRCR		0xff200020
-#endif	/* CONFIG_CPU_SH4 */
-
-#endif /* __ASM_CPU_SH4_UBC_H */
-
diff --git a/arch/sh/include/mach-common/mach/magicpanelr2.h b/arch/sh/include/mach-common/mach/magicpanelr2.h
index c644a77..183a2f7 100644
--- a/arch/sh/include/mach-common/mach/magicpanelr2.h
+++ b/arch/sh/include/mach-common/mach/magicpanelr2.h
@@ -19,12 +19,12 @@
 #include <asm/io_generic.h>
 
 
-#define SETBITS_OUTB(mask, reg)   ctrl_outb(ctrl_inb(reg) | mask, reg)
-#define SETBITS_OUTW(mask, reg)   ctrl_outw(ctrl_inw(reg) | mask, reg)
-#define SETBITS_OUTL(mask, reg)   ctrl_outl(ctrl_inl(reg) | mask, reg)
-#define CLRBITS_OUTB(mask, reg)   ctrl_outb(ctrl_inb(reg) & ~mask, reg)
-#define CLRBITS_OUTW(mask, reg)   ctrl_outw(ctrl_inw(reg) & ~mask, reg)
-#define CLRBITS_OUTL(mask, reg)   ctrl_outl(ctrl_inl(reg) & ~mask, reg)
+#define SETBITS_OUTB(mask, reg)   __raw_writeb(__raw_readb(reg) | mask, reg)
+#define SETBITS_OUTW(mask, reg)   __raw_writew(__raw_readw(reg) | mask, reg)
+#define SETBITS_OUTL(mask, reg)   __raw_writel(__raw_readl(reg) | mask, reg)
+#define CLRBITS_OUTB(mask, reg)   __raw_writeb(__raw_readb(reg) & ~mask, reg)
+#define CLRBITS_OUTW(mask, reg)   __raw_writew(__raw_readw(reg) & ~mask, reg)
+#define CLRBITS_OUTL(mask, reg)   __raw_writel(__raw_readl(reg) & ~mask, reg)
 
 
 #define PA_LED          PORT_PADR      /* LED */
diff --git a/arch/sh/include/mach-dreamcast/mach/sysasic.h b/arch/sh/include/mach-dreamcast/mach/sysasic.h
index f334266..58f710e 100644
--- a/arch/sh/include/mach-dreamcast/mach/sysasic.h
+++ b/arch/sh/include/mach-dreamcast/mach/sysasic.h
@@ -39,5 +39,10 @@
 
 #define HW_EVENT_IRQ_MAX (HW_EVENT_IRQ_BASE + 95)
 
+/* arch/sh/boards/mach-dreamcast/irq.c */
+extern int systemasic_irq_demux(int);
+extern void systemasic_irq_init(void);
+extern void aica_time_init(void);
+
 #endif /* __ASM_SH_DREAMCAST_SYSASIC_H */
 
diff --git a/arch/sh/include/mach-sdk7786/mach/fpga.h b/arch/sh/include/mach-sdk7786/mach/fpga.h
new file mode 100644
index 0000000..2120d67
--- /dev/null
+++ b/arch/sh/include/mach-sdk7786/mach/fpga.h
@@ -0,0 +1,114 @@
+#ifndef __MACH_SDK7786_FPGA_H
+#define __MACH_SDK7786_FPGA_H
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define SRSTR		0x000
+#define  SRSTR_MAGIC	0x1971	/* Fixed magical read value */
+
+#define INTASR		0x010
+#define INTAMR		0x020
+#define MODSWR		0x030
+#define INTTESTR	0x040
+#define SYSSR		0x050
+#define NRGPR		0x060
+#define NMISR		0x070
+
+#define NMIMR		0x080
+#define  NMIMR_MAN_NMIM	BIT(0)	/* Manual NMI mask */
+#define  NMIMR_AUX_NMIM	BIT(1)	/* Auxiliary NMI mask */
+
+#define INTBSR		0x090
+#define INTBMR		0x0a0
+#define USRLEDR		0x0b0
+#define MAPSWR		0x0c0
+#define FPGAVR		0x0d0
+#define FPGADR		0x0e0
+#define PCBRR		0x0f0
+#define RSR		0x100
+#define EXTASR		0x110
+#define SPCAR		0x120
+#define INTMSR		0x130
+#define PCIECR		0x140
+#define FAER		0x150
+#define USRGPIR		0x160
+/* 0x170 reserved */
+#define LCLASR		0x180
+
+#define SBCR		0x190
+#define  SCBR_I2CMEN	BIT(0)	/* FPGA I2C master enable */
+#define  SCBR_I2CCEN	BIT(1)	/* CPU I2C master enable */
+
+#define PWRCR		0x1a0
+#define SPCBR		0x1b0
+#define SPICR		0x1c0
+#define SPIDR		0x1d0
+#define I2CCR		0x1e0
+#define I2CDR		0x1f0
+#define FPGACR		0x200
+#define IASELR1		0x210
+#define IASELR2		0x220
+#define IASELR3		0x230
+#define IASELR4		0x240
+#define IASELR5		0x250
+#define IASELR6		0x260
+#define IASELR7		0x270
+#define IASELR8		0x280
+#define IASELR9		0x290
+#define IASELR10	0x2a0
+#define IASELR11	0x2b0
+#define IASELR12	0x2c0
+#define IASELR13	0x2d0
+#define IASELR14	0x2e0
+#define IASELR15	0x2f0
+/* 0x300 reserved */
+#define IBSELR1		0x310
+#define IBSELR2		0x320
+#define IBSELR3		0x330
+#define IBSELR4		0x340
+#define IBSELR5		0x350
+#define IBSELR6		0x360
+#define IBSELR7		0x370
+#define IBSELR8		0x380
+#define IBSELR9		0x390
+#define IBSELR10	0x3a0
+#define IBSELR11	0x3b0
+#define IBSELR12	0x3c0
+#define IBSELR13	0x3d0
+#define IBSELR14	0x3e0
+#define IBSELR15	0x3f0
+#define USRACR		0x400
+#define BEEPR		0x410
+#define USRLCDR		0x420
+#define SMBCR		0x430
+#define SMBDR		0x440
+#define USBCR		0x450
+#define AMSR		0x460
+#define ACCR		0x470
+#define SDIFCR		0x480
+
+/* arch/sh/boards/mach-sdk7786/fpga.c */
+extern void __iomem *sdk7786_fpga_base;
+extern void sdk7786_fpga_init(void);
+
+#define SDK7786_FPGA_REGADDR(reg)	(sdk7786_fpga_base + (reg))
+
+/*
+ * A convenience wrapper from register offset to internal I2C address,
+ * when the FPGA is in I2C slave mode.
+ */
+#define SDK7786_FPGA_I2CADDR(reg)	((reg) >> 3)
+
+static inline u16 fpga_read_reg(unsigned int reg)
+{
+	return ioread16(sdk7786_fpga_base + reg);
+}
+
+static inline void fpga_write_reg(u16 val, unsigned int reg)
+{
+	iowrite16(val, sdk7786_fpga_base + reg);
+}
+
+#endif /* __MACH_SDK7786_FPGA_H */
diff --git a/arch/sh/include/mach-sdk7786/mach/irq.h b/arch/sh/include/mach-sdk7786/mach/irq.h
new file mode 100644
index 0000000..0f58463
--- /dev/null
+++ b/arch/sh/include/mach-sdk7786/mach/irq.h
@@ -0,0 +1,7 @@
+#ifndef __MACH_SDK7786_IRQ_H
+#define __MACH_SDK7786_IRQ_H
+
+/* arch/sh/boards/mach-sdk7786/irq.c */
+extern void sdk7786_init_irq(void);
+
+#endif /* __MACH_SDK7786_IRQ_H */
diff --git a/arch/sh/include/mach-se/mach/se7343.h b/arch/sh/include/mach-se/mach/se7343.h
index 749914b..8d8170d 100644
--- a/arch/sh/include/mach-se/mach/se7343.h
+++ b/arch/sh/include/mach-se/mach/se7343.h
@@ -94,26 +94,26 @@
 
 #define PORT_DRVCR	0xA4050180
 
-#define PORT_PADR  	0xA4050120
-#define PORT_PBDR  	0xA4050122
-#define PORT_PCDR  	0xA4050124
-#define PORT_PDDR  	0xA4050126
-#define PORT_PEDR  	0xA4050128
-#define PORT_PFDR  	0xA405012A
-#define PORT_PGDR  	0xA405012C
-#define PORT_PHDR  	0xA405012E
-#define PORT_PJDR  	0xA4050130
-#define PORT_PKDR  	0xA4050132
-#define PORT_PLDR  	0xA4050134
-#define PORT_PMDR  	0xA4050136
-#define PORT_PNDR  	0xA4050138
-#define PORT_PQDR  	0xA405013A
-#define PORT_PRDR  	0xA405013C
-#define PORT_PTDR  	0xA4050160
-#define PORT_PUDR  	0xA4050162
-#define PORT_PVDR  	0xA4050164
-#define PORT_PWDR  	0xA4050166
-#define PORT_PYDR  	0xA4050168
+#define PORT_PADR	0xA4050120
+#define PORT_PBDR	0xA4050122
+#define PORT_PCDR	0xA4050124
+#define PORT_PDDR	0xA4050126
+#define PORT_PEDR	0xA4050128
+#define PORT_PFDR	0xA405012A
+#define PORT_PGDR	0xA405012C
+#define PORT_PHDR	0xA405012E
+#define PORT_PJDR	0xA4050130
+#define PORT_PKDR	0xA4050132
+#define PORT_PLDR	0xA4050134
+#define PORT_PMDR	0xA4050136
+#define PORT_PNDR	0xA4050138
+#define PORT_PQDR	0xA405013A
+#define PORT_PRDR	0xA405013C
+#define PORT_PTDR	0xA4050160
+#define PORT_PUDR	0xA4050162
+#define PORT_PVDR	0xA4050164
+#define PORT_PWDR	0xA4050166
+#define PORT_PYDR	0xA4050168
 
 #define FPGA_IN		0xb1400000
 #define FPGA_OUT	0xb1400002
@@ -133,18 +133,10 @@
 #define SE7343_FPGA_IRQ_UARTB	11
 
 #define SE7343_FPGA_IRQ_NR	12
-#define SE7343_FPGA_IRQ_BASE	120
-
-#define MRSHPC_IRQ3    	(SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC3)
-#define MRSHPC_IRQ2    	(SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC2)
-#define MRSHPC_IRQ1    	(SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC1)
-#define MRSHPC_IRQ0    	(SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC0)
-#define SMC_IRQ		(SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_SMC)
-#define USB_IRQ		(SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_USB)
-#define UARTA_IRQ	(SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_UARTA)
-#define UARTB_IRQ	(SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_UARTB)
 
 /* arch/sh/boards/se/7343/irq.c */
+extern unsigned int se7343_fpga_irq[];
+
 void init_7343se_IRQ(void);
 
 #endif  /* __ASM_SH_HITACHI_SE7343_H */
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 0d587da..02fd3ae 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -13,8 +13,9 @@
 
 obj-y	:= debugtraps.o dma-nommu.o dumpstack.o 			\
 	   idle.o io.o io_generic.o irq.o				\
-	   irq_$(BITS).o machvec.o nmi_debug.o process_$(BITS).o 	\
-	   ptrace_$(BITS).o return_address.o				\
+	   irq_$(BITS).o machvec.o nmi_debug.o process.o		\
+	   process_$(BITS).o ptrace_$(BITS).o				\
+	   reboot.o return_address.o					\
 	   setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o		\
 	   syscalls_$(BITS).o time.o topology.o traps.o			\
 	   traps_$(BITS).o unwinder.o
@@ -22,7 +23,7 @@
 obj-y				+= cpu/
 obj-$(CONFIG_VSYSCALL)		+= vsyscall/
 obj-$(CONFIG_SMP)		+= smp.o
-obj-$(CONFIG_SH_STANDARD_BIOS)	+= sh_bios.o early_printk.o
+obj-$(CONFIG_SH_STANDARD_BIOS)	+= sh_bios.o
 obj-$(CONFIG_KGDB)		+= kgdb.o
 obj-$(CONFIG_SH_CPU_FREQ)	+= cpufreq.o
 obj-$(CONFIG_MODULES)		+= sh_ksyms_$(BITS).o module.o
@@ -39,6 +40,7 @@
 obj-$(CONFIG_DWARF_UNWINDER)	+= dwarf.o
 obj-$(CONFIG_PERF_EVENTS)	+= perf_event.o perf_callchain.o
 
+obj-$(CONFIG_HAVE_HW_BREAKPOINT)		+= hw_breakpoint.o
 obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)	+= localtimer.o
 
 EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index d97c803..0e48bc6 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -17,5 +17,7 @@
 
 obj-$(CONFIG_SH_ADC)		+= adc.o
 obj-$(CONFIG_SH_CLK_CPG)	+= clock-cpg.o
+obj-$(CONFIG_SH_FPU)		+= fpu.o
+obj-$(CONFIG_SH_FPU_EMU)	+= fpu.o
 
 obj-y	+= irq/ init.o clock.o hwblk.o
diff --git a/arch/sh/kernel/cpu/adc.c b/arch/sh/kernel/cpu/adc.c
index da3d687..d307571 100644
--- a/arch/sh/kernel/cpu/adc.c
+++ b/arch/sh/kernel/cpu/adc.c
@@ -18,19 +18,19 @@
 
 	off = (channel & 0x03) << 2;
 
-	csr = ctrl_inb(ADCSR);
+	csr = __raw_readb(ADCSR);
 	csr = channel | ADCSR_ADST | ADCSR_CKS;
-	ctrl_outb(csr, ADCSR);
+	__raw_writeb(csr, ADCSR);
 
 	do {
-		csr = ctrl_inb(ADCSR);
+		csr = __raw_readb(ADCSR);
 	} while ((csr & ADCSR_ADF) == 0);
 
 	csr &= ~(ADCSR_ADF | ADCSR_ADST);
-	ctrl_outb(csr, ADCSR);
+	__raw_writeb(csr, ADCSR);
 
-	return (((ctrl_inb(ADDRAH + off) << 8) |
-		ctrl_inb(ADDRAL + off)) >> 6);
+	return (((__raw_readb(ADDRAH + off) << 8) |
+		__raw_readb(ADDRAL + off)) >> 6);
 }
 
 EXPORT_SYMBOL(adc_single);
diff --git a/arch/sh/kernel/cpu/clock-cpg.c b/arch/sh/kernel/cpu/clock-cpg.c
index 6dfe2cc..eed5eaf 100644
--- a/arch/sh/kernel/cpu/clock-cpg.c
+++ b/arch/sh/kernel/cpu/clock-cpg.c
@@ -149,7 +149,8 @@
 
 static unsigned long sh_clk_div4_recalc(struct clk *clk)
 {
-	struct clk_div_mult_table *table = clk->priv;
+	struct clk_div4_table *d4t = clk->priv;
+	struct clk_div_mult_table *table = d4t->div_mult_table;
 	unsigned int idx;
 
 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
@@ -160,17 +161,90 @@
 	return clk->freq_table[idx].frequency;
 }
 
+static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
+{
+	struct clk_div4_table *d4t = clk->priv;
+	struct clk_div_mult_table *table = d4t->div_mult_table;
+	u32 value;
+	int ret;
+
+	if (!strcmp("pll_clk", parent->name))
+		value = __raw_readl(clk->enable_reg) & ~(1 << 7);
+	else
+		value = __raw_readl(clk->enable_reg) | (1 << 7);
+
+	ret = clk_reparent(clk, parent);
+	if (ret < 0)
+		return ret;
+
+	__raw_writel(value, clk->enable_reg);
+
+	/* Rebiuld the frequency table */
+	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+			     table, &clk->arch_flags);
+
+	return 0;
+}
+
+static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id)
+{
+	struct clk_div4_table *d4t = clk->priv;
+	unsigned long value;
+	int idx = clk_rate_table_find(clk, clk->freq_table, rate);
+	if (idx < 0)
+		return idx;
+
+	value = __raw_readl(clk->enable_reg);
+	value &= ~(0xf << clk->enable_bit);
+	value |= (idx << clk->enable_bit);
+	__raw_writel(value, clk->enable_reg);
+
+	if (d4t->kick)
+		d4t->kick(clk);
+
+	return 0;
+}
+
+static int sh_clk_div4_enable(struct clk *clk)
+{
+	__raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
+	return 0;
+}
+
+static void sh_clk_div4_disable(struct clk *clk)
+{
+	__raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
+}
+
 static struct clk_ops sh_clk_div4_clk_ops = {
 	.recalc		= sh_clk_div4_recalc,
+	.set_rate	= sh_clk_div4_set_rate,
 	.round_rate	= sh_clk_div_round_rate,
 };
 
-int __init sh_clk_div4_register(struct clk *clks, int nr,
-				struct clk_div_mult_table *table)
+static struct clk_ops sh_clk_div4_enable_clk_ops = {
+	.recalc		= sh_clk_div4_recalc,
+	.set_rate	= sh_clk_div4_set_rate,
+	.round_rate	= sh_clk_div_round_rate,
+	.enable		= sh_clk_div4_enable,
+	.disable	= sh_clk_div4_disable,
+};
+
+static struct clk_ops sh_clk_div4_reparent_clk_ops = {
+	.recalc		= sh_clk_div4_recalc,
+	.set_rate	= sh_clk_div4_set_rate,
+	.round_rate	= sh_clk_div_round_rate,
+	.enable		= sh_clk_div4_enable,
+	.disable	= sh_clk_div4_disable,
+	.set_parent	= sh_clk_div4_set_parent,
+};
+
+static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
+			struct clk_div4_table *table, struct clk_ops *ops)
 {
 	struct clk *clkp;
 	void *freq_table;
-	int nr_divs = table->nr_divisors;
+	int nr_divs = table->div_mult_table->nr_divisors;
 	int freq_table_size = sizeof(struct cpufreq_frequency_table);
 	int ret = 0;
 	int k;
@@ -185,7 +259,7 @@
 	for (k = 0; !ret && (k < nr); k++) {
 		clkp = clks + k;
 
-		clkp->ops = &sh_clk_div4_clk_ops;
+		clkp->ops = ops;
 		clkp->id = -1;
 		clkp->priv = table;
 
@@ -198,6 +272,26 @@
 	return ret;
 }
 
+int __init sh_clk_div4_register(struct clk *clks, int nr,
+				struct clk_div4_table *table)
+{
+	return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
+}
+
+int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
+				struct clk_div4_table *table)
+{
+	return sh_clk_div4_register_ops(clks, nr, table,
+					&sh_clk_div4_enable_clk_ops);
+}
+
+int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
+				struct clk_div4_table *table)
+{
+	return sh_clk_div4_register_ops(clks, nr, table,
+					&sh_clk_div4_reparent_clk_ops);
+}
+
 #ifdef CONFIG_SH_CLK_CPG_LEGACY
 static struct clk master_clk = {
 	.name		= "master_clk",
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
new file mode 100644
index 0000000..f059ed6
--- /dev/null
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -0,0 +1,84 @@
+#include <linux/sched.h>
+#include <asm/processor.h>
+#include <asm/fpu.h>
+
+int init_fpu(struct task_struct *tsk)
+{
+	if (tsk_used_math(tsk)) {
+		if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
+			unlazy_fpu(tsk, task_pt_regs(tsk));
+		return 0;
+	}
+
+	/*
+	 * Memory allocation at the first usage of the FPU and other state.
+	 */
+	if (!tsk->thread.xstate) {
+		tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
+						      GFP_KERNEL);
+		if (!tsk->thread.xstate)
+			return -ENOMEM;
+	}
+
+	if (boot_cpu_data.flags & CPU_HAS_FPU) {
+		struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
+		memset(fp, 0, xstate_size);
+		fp->fpscr = FPSCR_INIT;
+	} else {
+		struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
+		memset(fp, 0, xstate_size);
+		fp->fpscr = FPSCR_INIT;
+	}
+
+	set_stopped_child_used_math(tsk);
+	return 0;
+}
+
+#ifdef CONFIG_SH_FPU
+void __fpu_state_restore(void)
+{
+	struct task_struct *tsk = current;
+
+	restore_fpu(tsk);
+
+	task_thread_info(tsk)->status |= TS_USEDFPU;
+	tsk->fpu_counter++;
+}
+
+void fpu_state_restore(struct pt_regs *regs)
+{
+	struct task_struct *tsk = current;
+
+	if (unlikely(!user_mode(regs))) {
+		printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
+		BUG();
+		return;
+	}
+
+	if (!tsk_used_math(tsk)) {
+		local_irq_enable();
+		/*
+		 * does a slab alloc which can sleep
+		 */
+		if (init_fpu(tsk)) {
+			/*
+			 * ran out of memory!
+			 */
+			do_group_exit(SIGKILL);
+			return;
+		}
+		local_irq_disable();
+	}
+
+	grab_fpu(regs);
+
+	__fpu_state_restore();
+}
+
+BUILD_TRAP_HANDLER(fpu_state_restore)
+{
+	TRAP_HANDLER_DECL;
+
+	fpu_state_restore(regs);
+}
+#endif /* CONFIG_SH_FPU */
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 89b4b76..c736422 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -24,22 +24,32 @@
 #include <asm/elf.h>
 #include <asm/io.h>
 #include <asm/smp.h>
-#ifdef CONFIG_SUPERH32
-#include <asm/ubc.h>
+#include <asm/sh_bios.h>
+
+#ifdef CONFIG_SH_FPU
+#define cpu_has_fpu	1
+#else
+#define cpu_has_fpu	0
+#endif
+
+#ifdef CONFIG_SH_DSP
+#define cpu_has_dsp	1
+#else
+#define cpu_has_dsp	0
 #endif
 
 /*
  * Generic wrapper for command line arguments to disable on-chip
  * peripherals (nofpu, nodsp, and so forth).
  */
-#define onchip_setup(x)				\
-static int x##_disabled __initdata = 0;		\
-						\
-static int __init x##_setup(char *opts)		\
-{						\
-	x##_disabled = 1;			\
-	return 1;				\
-}						\
+#define onchip_setup(x)					\
+static int x##_disabled __initdata = !cpu_has_##x;	\
+							\
+static int __init x##_setup(char *opts)			\
+{							\
+	x##_disabled = 1;				\
+	return 1;					\
+}							\
 __setup("no" __stringify(x), x##_setup);
 
 onchip_setup(fpu);
@@ -52,10 +62,10 @@
 static void __init speculative_execution_init(void)
 {
 	/* Clear RABD */
-	ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
+	__raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
 
 	/* Flush the update */
-	(void)ctrl_inl(CPUOPM);
+	(void)__raw_readl(CPUOPM);
 	ctrl_barrier();
 }
 #else
@@ -89,7 +99,7 @@
 #endif
 
 /* 2nd-level cache init */
-void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void)
+void __attribute__ ((weak)) l2_cache_init(void)
 {
 }
 
@@ -97,12 +107,12 @@
  * Generic first-level cache init
  */
 #ifdef CONFIG_SUPERH32
-static void __uses_jump_to_uncached cache_init(void)
+static void cache_init(void)
 {
 	unsigned long ccr, flags;
 
 	jump_to_uncached();
-	ccr = ctrl_inl(CCR);
+	ccr = __raw_readl(CCR);
 
 	/*
 	 * At this point we don't know whether the cache is enabled or not - a
@@ -146,7 +156,7 @@
 			for (addr = addrstart;
 			     addr < addrstart + waysize;
 			     addr += current_cpu_data.dcache.linesz)
-				ctrl_outl(0, addr);
+				__raw_writel(0, addr);
 
 			addrstart += current_cpu_data.dcache.way_incr;
 		} while (--ways);
@@ -179,7 +189,7 @@
 
 	l2_cache_init();
 
-	ctrl_outl(flags, CCR);
+	__raw_writel(flags, CCR);
 	back_to_cached();
 }
 #else
@@ -207,6 +217,18 @@
 		l2_cache_shape = -1; /* No S-cache */
 }
 
+static void __init fpu_init(void)
+{
+	/* Disable the FPU */
+	if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
+		printk("FPU Disabled\n");
+		current_cpu_data.flags &= ~CPU_HAS_FPU;
+	}
+
+	disable_fpu();
+	clear_used_math();
+}
+
 #ifdef CONFIG_SH_DSP
 static void __init release_dsp(void)
 {
@@ -244,28 +266,35 @@
 	if (sr & SR_DSP)
 		current_cpu_data.flags |= CPU_HAS_DSP;
 
+	/* Disable the DSP */
+	if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
+		printk("DSP Disabled\n");
+		current_cpu_data.flags &= ~CPU_HAS_DSP;
+	}
+
 	/* Now that we've determined the DSP status, clear the DSP bit. */
 	release_dsp();
 }
+#else
+static inline void __init dsp_init(void) { }
 #endif /* CONFIG_SH_DSP */
 
 /**
  * sh_cpu_init
  *
- * This is our initial entry point for each CPU, and is invoked on the boot
- * CPU prior to calling start_kernel(). For SMP, a combination of this and
- * start_secondary() will bring up each processor to a ready state prior
- * to hand forking the idle loop.
+ * This is our initial entry point for each CPU, and is invoked on the
+ * boot CPU prior to calling start_kernel(). For SMP, a combination of
+ * this and start_secondary() will bring up each processor to a ready
+ * state prior to hand forking the idle loop.
  *
- * We do all of the basic processor init here, including setting up the
- * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is
- * hit (and subsequently platform_setup()) things like determining the
- * CPU subtype and initial configuration will all be done.
+ * We do all of the basic processor init here, including setting up
+ * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and
+ * subsequently platform_setup()) things like determining the CPU
+ * subtype and initial configuration will all be done.
  *
  * Each processor family is still responsible for doing its own probing
  * and cache configuration in detect_cpu_and_cache_system().
  */
-
 asmlinkage void __init sh_cpu_init(void)
 {
 	current_thread_info()->cpu = hard_smp_processor_id();
@@ -302,18 +331,8 @@
 		detect_cache_shape();
 	}
 
-	/* Disable the FPU */
-	if (fpu_disabled) {
-		printk("FPU Disabled\n");
-		current_cpu_data.flags &= ~CPU_HAS_FPU;
-	}
-
-	/* FPU initialization */
-	disable_fpu();
-	if ((current_cpu_data.flags & CPU_HAS_FPU)) {
-		current_thread_info()->status &= ~TS_USEDFPU;
-		clear_used_math();
-	}
+	fpu_init();
+	dsp_init();
 
 	/*
 	 * Initialize the per-CPU ASID cache very early, since the
@@ -321,18 +340,24 @@
 	 */
 	current_cpu_data.asid_cache = NO_CONTEXT;
 
-#ifdef CONFIG_SH_DSP
-	/* Probe for DSP */
-	dsp_init();
-
-	/* Disable the DSP */
-	if (dsp_disabled) {
-		printk("DSP Disabled\n");
-		current_cpu_data.flags &= ~CPU_HAS_DSP;
-		release_dsp();
-	}
-#endif
-
 	speculative_execution_init();
 	expmask_init();
+
+	/* Do the rest of the boot processor setup */
+	if (raw_smp_processor_id() == 0) {
+		/* Save off the BIOS VBR, if there is one */
+		sh_bios_vbr_init();
+
+		/*
+		 * Setup VBR for boot CPU. Secondary CPUs do this through
+		 * start_secondary().
+		 */
+		per_cpu_trap_init();
+
+		/*
+		 * Boot processor to setup the FP and extended state
+		 * context info.
+		 */
+		init_thread_xstate();
+	}
 }
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
index 06e7e29..96a2395 100644
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -123,7 +123,7 @@
 		bitmask = 1 << (irq - 32);
 	}
 
-	ctrl_outl(bitmask, reg);
+	__raw_writel(bitmask, reg);
 }
 
 static void disable_intc_irq(unsigned int irq)
@@ -139,7 +139,7 @@
 		bitmask = 1 << (irq - 32);
 	}
 
-	ctrl_outl(bitmask, reg);
+	__raw_writel(bitmask, reg);
 }
 
 static void mask_and_ack_intc(unsigned int irq)
@@ -170,11 +170,11 @@
 
 
 	/* Disable all interrupts and set all priorities to 0 to avoid trouble */
-	ctrl_outl(-1, INTC_INTDSB_0);
-	ctrl_outl(-1, INTC_INTDSB_1);
+	__raw_writel(-1, INTC_INTDSB_0);
+	__raw_writel(-1, INTC_INTDSB_1);
 
 	for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
-		ctrl_outl( NO_PRIORITY, reg);
+		__raw_writel( NO_PRIORITY, reg);
 
 
 #ifdef CONFIG_SH_CAYMAN
@@ -199,7 +199,7 @@
 			reg = INTC_ICR_SET;
 			i = IRQ_IRL0;
 		}
-		ctrl_outl(INTC_ICR_IRLM, reg);
+		__raw_writel(INTC_ICR_IRLM, reg);
 
 		/* Set interrupt priorities according to platform description */
 		for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
@@ -207,7 +207,7 @@
 				((i % INTC_INTPRI_PPREG) * 4);
 			if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
 				/* Upon the 7th, set Priority Register */
-				ctrl_outl(data, reg);
+				__raw_writel(data, reg);
 				data = 0;
 				reg += 8;
 			}
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
index 4fe8631..0c9f24d 100644
--- a/arch/sh/kernel/cpu/sh2/clock-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
@@ -31,7 +31,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+	clk->rate *= PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
 }
 
 static struct clk_ops sh7619_master_clk_ops = {
@@ -40,7 +40,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	int idx = (__raw_readw(FREQCR) & 0x0007);
 	return clk->parent->rate / pfc_divisors[idx];
 }
 
@@ -50,7 +50,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	return clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+	return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
 }
 
 static struct clk_ops sh7619_bus_clk_ops = {
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
index 7814c76..b26264d 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
@@ -34,7 +34,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	return 10000000 * PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+	return 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
 }
 
 static struct clk_ops sh7201_master_clk_ops = {
@@ -43,7 +43,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	int idx = (__raw_readw(FREQCR) & 0x0007);
 	return clk->parent->rate / pfc_divisors[idx];
 }
 
@@ -53,7 +53,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	int idx = (__raw_readw(FREQCR) & 0x0007);
 	return clk->parent->rate / pfc_divisors[idx];
 }
 
@@ -63,7 +63,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inw(FREQCR) >> 4) & 0x0007);
+	int idx = ((__raw_readw(FREQCR) >> 4) & 0x0007);
 	return clk->parent->rate / ifc_divisors[idx];
 }
 
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
index 9409869..7e75d8f 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
@@ -39,7 +39,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	clk->rate *= pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0003] * PLL2 ;
+	clk->rate *= pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0003] * PLL2 ;
 }
 
 static struct clk_ops sh7203_master_clk_ops = {
@@ -48,7 +48,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	int idx = (__raw_readw(FREQCR) & 0x0007);
 	return clk->parent->rate / pfc_divisors[idx];
 }
 
@@ -58,7 +58,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	int idx = (__raw_readw(FREQCR) & 0x0007);
 	return clk->parent->rate / pfc_divisors[idx-2];
 }
 
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
index c2268bd..b27a5e2 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
@@ -34,7 +34,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+	clk->rate *= PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
 }
 
 static struct clk_ops sh7206_master_clk_ops = {
@@ -43,7 +43,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	int idx = (__raw_readw(FREQCR) & 0x0007);
 	return clk->parent->rate / pfc_divisors[idx];
 }
 
@@ -53,7 +53,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	return clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+	return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
 }
 
 static struct clk_ops sh7206_bus_clk_ops = {
@@ -62,7 +62,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	int idx = (__raw_readw(FREQCR) & 0x0007);
 	return clk->parent->rate / ifc_divisors[idx];
 }
 
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
index d395ce5..488d24e 100644
--- a/arch/sh/kernel/cpu/sh2a/fpu.c
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -26,8 +26,7 @@
 /*
  * Save FPU registers onto task structure.
  */
-void
-save_fpu(struct task_struct *tsk)
+void save_fpu(struct task_struct *tsk)
 {
 	unsigned long dummy;
 
@@ -52,7 +51,7 @@
 		     "fmov.s	fr0, @-%0\n\t"
 		     "lds	%3, fpscr\n\t"
 		     : "=r" (dummy)
-		     : "0" ((char *)(&tsk->thread.fpu.hard.status)),
+		     : "0" ((char *)(&tsk->thread.xstate->hardfpu.status)),
 		       "r" (FPSCR_RCHG),
 		       "r" (FPSCR_INIT)
 		     : "memory");
@@ -60,8 +59,7 @@
 	disable_fpu();
 }
 
-static void
-restore_fpu(struct task_struct *tsk)
+void restore_fpu(struct task_struct *tsk)
 {
 	unsigned long dummy;
 
@@ -85,45 +83,12 @@
 		     "lds.l	@%0+, fpscr\n\t"
 		     "lds.l	@%0+, fpul\n\t"
 		     : "=r" (dummy)
-		     : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
+		     : "0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
 		     : "memory");
 	disable_fpu();
 }
 
 /*
- * Load the FPU with signalling NANS.  This bit pattern we're using
- * has the property that no matter wether considered as single or as
- * double precission represents signaling NANS.
- */
-
-static void
-fpu_init(void)
-{
-	enable_fpu();
-	asm volatile("lds	%0, fpul\n\t"
-		     "fsts	fpul, fr0\n\t"
-		     "fsts	fpul, fr1\n\t"
-		     "fsts	fpul, fr2\n\t"
-		     "fsts	fpul, fr3\n\t"
-		     "fsts	fpul, fr4\n\t"
-		     "fsts	fpul, fr5\n\t"
-		     "fsts	fpul, fr6\n\t"
-		     "fsts	fpul, fr7\n\t"
-		     "fsts	fpul, fr8\n\t"
-		     "fsts	fpul, fr9\n\t"
-		     "fsts	fpul, fr10\n\t"
-		     "fsts	fpul, fr11\n\t"
-		     "fsts	fpul, fr12\n\t"
-		     "fsts	fpul, fr13\n\t"
-		     "fsts	fpul, fr14\n\t"
-		     "fsts	fpul, fr15\n\t"
-		     "lds	%2, fpscr\n\t"
-		     : /* no output */
-		     : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
-	disable_fpu();
-}
-
-/*
  *	Emulate arithmetic ops on denormalized number for some FPU insns.
  */
 
@@ -490,9 +455,9 @@
 	if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
 		struct task_struct *tsk = current;
 
-		if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) {
+		if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) {
 			/* FPU error */
-			denormal_to_double (&tsk->thread.fpu.hard,
+			denormal_to_double (&tsk->thread.xstate->hardfpu,
 					    (finsn >> 8) & 0xf);
 		} else
 			return 0;
@@ -507,9 +472,9 @@
 
 		n = (finsn >> 8) & 0xf;
 		m = (finsn >> 4) & 0xf;
-		hx = tsk->thread.fpu.hard.fp_regs[n];
-		hy = tsk->thread.fpu.hard.fp_regs[m];
-		fpscr = tsk->thread.fpu.hard.fpscr;
+		hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+		hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+		fpscr = tsk->thread.xstate->hardfpu.fpscr;
 		prec = fpscr & (1 << 19);
 
 		if ((fpscr & FPSCR_FPU_ERROR)
@@ -519,15 +484,15 @@
 
 			/* FPU error because of denormal */
 			llx = ((long long) hx << 32)
-			       | tsk->thread.fpu.hard.fp_regs[n+1];
+			       | tsk->thread.xstate->hardfpu.fp_regs[n+1];
 			lly = ((long long) hy << 32)
-			       | tsk->thread.fpu.hard.fp_regs[m+1];
+			       | tsk->thread.xstate->hardfpu.fp_regs[m+1];
 			if ((hx & 0x7fffffff) >= 0x00100000)
 				llx = denormal_muld(lly, llx);
 			else
 				llx = denormal_muld(llx, lly);
-			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
-			tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
+			tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+			tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
 		} else if ((fpscr & FPSCR_FPU_ERROR)
 		     && (!prec && ((hx & 0x7fffffff) < 0x00800000
 				   || (hy & 0x7fffffff) < 0x00800000))) {
@@ -536,7 +501,7 @@
 				hx = denormal_mulf(hy, hx);
 			else
 				hx = denormal_mulf(hx, hy);
-			tsk->thread.fpu.hard.fp_regs[n] = hx;
+			tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
 		} else
 			return 0;
 
@@ -550,9 +515,9 @@
 
 		n = (finsn >> 8) & 0xf;
 		m = (finsn >> 4) & 0xf;
-		hx = tsk->thread.fpu.hard.fp_regs[n];
-		hy = tsk->thread.fpu.hard.fp_regs[m];
-		fpscr = tsk->thread.fpu.hard.fpscr;
+		hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+		hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+		fpscr = tsk->thread.xstate->hardfpu.fpscr;
 		prec = fpscr & (1 << 19);
 
 		if ((fpscr & FPSCR_FPU_ERROR)
@@ -562,15 +527,15 @@
 
 			/* FPU error because of denormal */
 			llx = ((long long) hx << 32)
-			       | tsk->thread.fpu.hard.fp_regs[n+1];
+			       | tsk->thread.xstate->hardfpu.fp_regs[n+1];
 			lly = ((long long) hy << 32)
-			       | tsk->thread.fpu.hard.fp_regs[m+1];
+			       | tsk->thread.xstate->hardfpu.fp_regs[m+1];
 			if ((finsn & 0xf00f) == 0xf000)
 				llx = denormal_addd(llx, lly);
 			else
 				llx = denormal_addd(llx, lly ^ (1LL << 63));
-			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
-			tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
+			tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+			tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
 		} else if ((fpscr & FPSCR_FPU_ERROR)
 		     && (!prec && ((hx & 0x7fffffff) < 0x00800000
 				   || (hy & 0x7fffffff) < 0x00800000))) {
@@ -579,7 +544,7 @@
 				hx = denormal_addf(hx, hy);
 			else
 				hx = denormal_addf(hx, hy ^ 0x80000000);
-			tsk->thread.fpu.hard.fp_regs[n] = hx;
+			tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
 		} else
 			return 0;
 
@@ -597,7 +562,7 @@
 
 	__unlazy_fpu(tsk, regs);
 	if (ieee_fpe_handler(regs)) {
-		tsk->thread.fpu.hard.fpscr &=
+		tsk->thread.xstate->hardfpu.fpscr &=
 			~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
 		grab_fpu(regs);
 		restore_fpu(tsk);
@@ -607,33 +572,3 @@
 
 	force_sig(SIGFPE, tsk);
 }
-
-void fpu_state_restore(struct pt_regs *regs)
-{
-	struct task_struct *tsk = current;
-
-	grab_fpu(regs);
-	if (unlikely(!user_mode(regs))) {
-		printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
-		BUG();
-		return;
-	}
-
-	if (likely(used_math())) {
-		/* Using the FPU again.  */
-		restore_fpu(tsk);
-	} else	{
-		/* First time FPU user.  */
-		fpu_init();
-		set_used_math();
-	}
-	task_thread_info(tsk)->status |= TS_USEDFPU;
-	tsk->fpu_counter++;
-}
-
-BUILD_TRAP_HANDLER(fpu_state_restore)
-{
-	TRAP_HANDLER_DECL;
-
-	fpu_state_restore(regs);
-}
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh3.c b/arch/sh/kernel/cpu/sh3/clock-sh3.c
index 27b8738..b78384a 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh3.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh3.c
@@ -28,7 +28,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
 
 	clk->rate *= pfc_divisors[idx];
@@ -40,7 +40,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
 
 	return clk->parent->rate / pfc_divisors[idx];
@@ -52,7 +52,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
 
 	return clk->parent->rate / stc_multipliers[idx];
@@ -64,7 +64,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
 
 	return clk->parent->rate / ifc_divisors[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7705.c b/arch/sh/kernel/cpu/sh3/clock-sh7705.c
index 0ca8f2c..0ecea14 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7705.c
@@ -32,7 +32,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	clk->rate *= pfc_divisors[ctrl_inw(FRQCR) & 0x0003];
+	clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0003];
 }
 
 static struct clk_ops sh7705_master_clk_ops = {
@@ -41,7 +41,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int idx = ctrl_inw(FRQCR) & 0x0003;
+	int idx = __raw_readw(FRQCR) & 0x0003;
 	return clk->parent->rate / pfc_divisors[idx];
 }
 
@@ -51,7 +51,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FRQCR) & 0x0300) >> 8;
+	int idx = (__raw_readw(FRQCR) & 0x0300) >> 8;
 	return clk->parent->rate / stc_multipliers[idx];
 }
 
@@ -61,7 +61,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FRQCR) & 0x0030) >> 4;
+	int idx = (__raw_readw(FRQCR) & 0x0030) >> 4;
 	return clk->parent->rate / ifc_divisors[idx];
 }
 
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7706.c b/arch/sh/kernel/cpu/sh3/clock-sh7706.c
index 4bf7887..6f9ff8b 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7706.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7706.c
@@ -24,7 +24,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
 
 	clk->rate *= pfc_divisors[idx];
@@ -36,7 +36,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
 
 	return clk->parent->rate / pfc_divisors[idx];
@@ -48,7 +48,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
 
 	return clk->parent->rate / stc_multipliers[idx];
@@ -60,7 +60,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
 
 	return clk->parent->rate / ifc_divisors[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
index e874950..f302ba0 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
@@ -24,7 +24,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
 
 	clk->rate *= pfc_divisors[idx];
@@ -36,7 +36,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
 
 	return clk->parent->rate / pfc_divisors[idx];
@@ -48,7 +48,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = (frqcr & 0x0080) ?
 		((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4) : 1;
 
@@ -61,7 +61,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
 
 	return clk->parent->rate / ifc_divisors[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7710.c b/arch/sh/kernel/cpu/sh3/clock-sh7710.c
index 030a58b..29a87d8 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7710.c
@@ -26,7 +26,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	clk->rate *= md_table[ctrl_inw(FRQCR) & 0x0007];
+	clk->rate *= md_table[__raw_readw(FRQCR) & 0x0007];
 }
 
 static struct clk_ops sh7710_master_clk_ops = {
@@ -35,7 +35,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FRQCR) & 0x0007);
+	int idx = (__raw_readw(FRQCR) & 0x0007);
 	return clk->parent->rate / md_table[idx];
 }
 
@@ -45,7 +45,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FRQCR) & 0x0700) >> 8;
+	int idx = (__raw_readw(FRQCR) & 0x0700) >> 8;
 	return clk->parent->rate / md_table[idx];
 }
 
@@ -55,7 +55,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FRQCR) & 0x0070) >> 4;
+	int idx = (__raw_readw(FRQCR) & 0x0070) >> 4;
 	return clk->parent->rate / md_table[idx];
 }
 
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
index 6428ee6..b0d0c52 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7712.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
@@ -23,7 +23,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = (frqcr & 0x0300) >> 8;
 
 	clk->rate *= multipliers[idx];
@@ -35,7 +35,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = frqcr & 0x0007;
 
 	return clk->parent->rate / divisors[idx];
@@ -47,7 +47,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int frqcr = ctrl_inw(FRQCR);
+	int frqcr = __raw_readw(FRQCR);
 	int idx = (frqcr & 0x0030) >> 4;
 
 	return clk->parent->rate / divisors[idx];
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
index 46610c3..99b4d02 100644
--- a/arch/sh/kernel/cpu/sh3/ex.S
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -49,7 +49,7 @@
 	.long	exception_error	! reserved_instruction (filled by trap_init) /* 180 */
 	.long	exception_error	! illegal_slot_instruction (filled by trap_init) /*1A0*/
 	.long	nmi_trap_handler	/* 1C0 */	! Allow trap to debugger
-	.long	break_point_trap	/* 1E0 */
+	.long	breakpoint_trap_handler	/* 1E0 */
 
 	/*
 	 * Pad the remainder of the table out, exceptions residing in far
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index f9c7df6..295ec4c 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -16,7 +16,7 @@
 #include <asm/cache.h>
 #include <asm/io.h>
 
-int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
+int detect_cpu_and_cache_system(void)
 {
 	unsigned long addr0, addr1, data0, data1, data2, data3;
 
@@ -30,23 +30,23 @@
 	addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);
 
 	/* First, write back & invalidate */
-	data0  = ctrl_inl(addr0);
-	ctrl_outl(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
-	data1  = ctrl_inl(addr1);
-	ctrl_outl(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);
+	data0  = __raw_readl(addr0);
+	__raw_writel(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
+	data1  = __raw_readl(addr1);
+	__raw_writel(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);
 
 	/* Next, check if there's shadow or not */
-	data0 = ctrl_inl(addr0);
+	data0 = __raw_readl(addr0);
 	data0 ^= SH_CACHE_VALID;
-	ctrl_outl(data0, addr0);
-	data1 = ctrl_inl(addr1);
+	__raw_writel(data0, addr0);
+	data1 = __raw_readl(addr1);
 	data2 = data1 ^ SH_CACHE_VALID;
-	ctrl_outl(data2, addr1);
-	data3 = ctrl_inl(addr0);
+	__raw_writel(data2, addr1);
+	data3 = __raw_readl(addr0);
 
 	/* Lastly, invaliate them. */
-	ctrl_outl(data0&~SH_CACHE_VALID, addr0);
-	ctrl_outl(data2&~SH_CACHE_VALID, addr1);
+	__raw_writel(data0&~SH_CACHE_VALID, addr0);
+	__raw_writel(data2&~SH_CACHE_VALID, addr1);
 
 	back_to_cached();
 
@@ -94,9 +94,9 @@
 		boot_cpu_data.dcache.way_incr	= (1 << 13);
 		boot_cpu_data.dcache.entry_mask	= 0x1ff0;
 		boot_cpu_data.dcache.sets	= 512;
-		ctrl_outl(CCR_CACHE_32KB, CCR3_REG);
+		__raw_writel(CCR_CACHE_32KB, CCR3_REG);
 #else
-		ctrl_outl(CCR_CACHE_16KB, CCR3_REG);
+		__raw_writel(CCR_CACHE_16KB, CCR3_REG);
 #endif
 #endif
 	}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh3.c b/arch/sh/kernel/cpu/sh3/setup-sh3.c
index c988468..53be70b 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh3.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh3.c
@@ -58,7 +58,7 @@
 void __init plat_irq_setup_pins(int mode)
 {
 	if (mode == IRQ_MODE_IRQ) {
-		ctrl_outw(ctrl_inw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1);
+		__raw_writew(__raw_readw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1);
 		register_intc_controller(&intc_desc_irq0123);
 		return;
 	}
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index 21421e3..6b80850 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -23,7 +23,7 @@
 
 static unsigned long emi_clk_recalc(struct clk *clk)
 {
-	int idx = ctrl_inl(CPG2_FRQCR3) & 0x0007;
+	int idx = __raw_readl(CPG2_FRQCR3) & 0x0007;
 	return clk->parent->rate / frqcr3_divisors[idx];
 }
 
@@ -52,7 +52,7 @@
 
 static unsigned long femi_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inl(CPG2_FRQCR3) >> 3) & 0x0007;
+	int idx = (__raw_readl(CPG2_FRQCR3) >> 3) & 0x0007;
 	return clk->parent->rate / frqcr3_divisors[idx];
 }
 
@@ -92,7 +92,7 @@
 
 static unsigned long shoc_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inl(CPG2_FRQCR3) >> 6) & 0x0007;
+	int idx = (__raw_readl(CPG2_FRQCR3) >> 6) & 0x0007;
 	return clk->parent->rate / frqcr3_divisors[idx];
 }
 
@@ -122,10 +122,10 @@
 
 	tmp = frqcr3_lookup(clk, rate);
 
-	frqcr3 = ctrl_inl(CPG2_FRQCR3);
+	frqcr3 = __raw_readl(CPG2_FRQCR3);
 	frqcr3 &= ~(0x0007 << 6);
 	frqcr3 |= tmp << 6;
-	ctrl_outl(frqcr3, CPG2_FRQCR3);
+	__raw_writel(frqcr3, CPG2_FRQCR3);
 
 	clk->rate = clk->parent->rate / frqcr3_divisors[tmp];
 
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4.c b/arch/sh/kernel/cpu/sh4/clock-sh4.c
index 73294d9..5add75c 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4.c
@@ -28,7 +28,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	clk->rate *= pfc_divisors[ctrl_inw(FRQCR) & 0x0007];
+	clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0007];
 }
 
 static struct clk_ops sh4_master_clk_ops = {
@@ -37,7 +37,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FRQCR) & 0x0007);
+	int idx = (__raw_readw(FRQCR) & 0x0007);
 	return clk->parent->rate / pfc_divisors[idx];
 }
 
@@ -47,7 +47,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FRQCR) >> 3) & 0x0007;
+	int idx = (__raw_readw(FRQCR) >> 3) & 0x0007;
 	return clk->parent->rate / bfc_divisors[idx];
 }
 
@@ -57,7 +57,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(FRQCR) >> 6) & 0x0007;
+	int idx = (__raw_readw(FRQCR) >> 6) & 0x0007;
 	return clk->parent->rate / ifc_divisors[idx];
 }
 
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index e97857a..447482d 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -85,14 +85,14 @@
 		      "fmov.s	fr1, @-%0\n\t"
 		      "fmov.s	fr0, @-%0\n\t"
 		      "lds	%3, fpscr\n\t":"=r" (dummy)
-		      :"0"((char *)(&tsk->thread.fpu.hard.status)),
+		      :"0"((char *)(&tsk->thread.xstate->hardfpu.status)),
 		      "r"(FPSCR_RCHG), "r"(FPSCR_INIT)
 		      :"memory");
 
 	disable_fpu();
 }
 
-static void restore_fpu(struct task_struct *tsk)
+void restore_fpu(struct task_struct *tsk)
 {
 	unsigned long dummy;
 
@@ -135,62 +135,11 @@
 		      "lds.l	@%0+, fpscr\n\t"
 		      "lds.l	@%0+, fpul\n\t"
 		      :"=r" (dummy)
-		      :"0"(&tsk->thread.fpu), "r"(FPSCR_RCHG)
+		      :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
 		      :"memory");
 	disable_fpu();
 }
 
-/*
- * Load the FPU with signalling NANS.  This bit pattern we're using
- * has the property that no matter wether considered as single or as
- * double precision represents signaling NANS.
- */
-
-static void fpu_init(void)
-{
-	enable_fpu();
-	asm volatile (	"lds	%0, fpul\n\t"
-			"lds	%1, fpscr\n\t"
-			"fsts	fpul, fr0\n\t"
-			"fsts	fpul, fr1\n\t"
-			"fsts	fpul, fr2\n\t"
-			"fsts	fpul, fr3\n\t"
-			"fsts	fpul, fr4\n\t"
-			"fsts	fpul, fr5\n\t"
-			"fsts	fpul, fr6\n\t"
-			"fsts	fpul, fr7\n\t"
-			"fsts	fpul, fr8\n\t"
-			"fsts	fpul, fr9\n\t"
-			"fsts	fpul, fr10\n\t"
-			"fsts	fpul, fr11\n\t"
-			"fsts	fpul, fr12\n\t"
-			"fsts	fpul, fr13\n\t"
-			"fsts	fpul, fr14\n\t"
-			"fsts	fpul, fr15\n\t"
-			"frchg\n\t"
-			"fsts	fpul, fr0\n\t"
-			"fsts	fpul, fr1\n\t"
-			"fsts	fpul, fr2\n\t"
-			"fsts	fpul, fr3\n\t"
-			"fsts	fpul, fr4\n\t"
-			"fsts	fpul, fr5\n\t"
-			"fsts	fpul, fr6\n\t"
-			"fsts	fpul, fr7\n\t"
-			"fsts	fpul, fr8\n\t"
-			"fsts	fpul, fr9\n\t"
-			"fsts	fpul, fr10\n\t"
-			"fsts	fpul, fr11\n\t"
-			"fsts	fpul, fr12\n\t"
-			"fsts	fpul, fr13\n\t"
-			"fsts	fpul, fr14\n\t"
-			"fsts	fpul, fr15\n\t"
-			"frchg\n\t"
-			"lds	%2, fpscr\n\t"
-			:	/* no output */
-			:"r" (0), "r"(FPSCR_RCHG), "r"(FPSCR_INIT));
-	disable_fpu();
-}
-
 /**
  *      denormal_to_double - Given denormalized float number,
  *                           store double float
@@ -282,9 +231,9 @@
 		/* fcnvsd */
 		struct task_struct *tsk = current;
 
-		if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR))
+		if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR))
 			/* FPU error */
-			denormal_to_double(&tsk->thread.fpu.hard,
+			denormal_to_double(&tsk->thread.xstate->hardfpu,
 					   (finsn >> 8) & 0xf);
 		else
 			return 0;
@@ -300,9 +249,9 @@
 
 		n = (finsn >> 8) & 0xf;
 		m = (finsn >> 4) & 0xf;
-		hx = tsk->thread.fpu.hard.fp_regs[n];
-		hy = tsk->thread.fpu.hard.fp_regs[m];
-		fpscr = tsk->thread.fpu.hard.fpscr;
+		hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+		hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+		fpscr = tsk->thread.xstate->hardfpu.fpscr;
 		prec = fpscr & FPSCR_DBL_PRECISION;
 
 		if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -312,18 +261,18 @@
 
 			/* FPU error because of denormal (doubles) */
 			llx = ((long long)hx << 32)
-			    | tsk->thread.fpu.hard.fp_regs[n + 1];
+			    | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
 			lly = ((long long)hy << 32)
-			    | tsk->thread.fpu.hard.fp_regs[m + 1];
+			    | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
 			llx = float64_mul(llx, lly);
-			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
-			tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+			tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+			tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
 		} else if ((fpscr & FPSCR_CAUSE_ERROR)
 			   && (!prec && ((hx & 0x7fffffff) < 0x00800000
 					 || (hy & 0x7fffffff) < 0x00800000))) {
 			/* FPU error because of denormal (floats) */
 			hx = float32_mul(hx, hy);
-			tsk->thread.fpu.hard.fp_regs[n] = hx;
+			tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
 		} else
 			return 0;
 
@@ -338,9 +287,9 @@
 
 		n = (finsn >> 8) & 0xf;
 		m = (finsn >> 4) & 0xf;
-		hx = tsk->thread.fpu.hard.fp_regs[n];
-		hy = tsk->thread.fpu.hard.fp_regs[m];
-		fpscr = tsk->thread.fpu.hard.fpscr;
+		hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+		hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+		fpscr = tsk->thread.xstate->hardfpu.fpscr;
 		prec = fpscr & FPSCR_DBL_PRECISION;
 
 		if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -350,15 +299,15 @@
 
 			/* FPU error because of denormal (doubles) */
 			llx = ((long long)hx << 32)
-			    | tsk->thread.fpu.hard.fp_regs[n + 1];
+			    | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
 			lly = ((long long)hy << 32)
-			    | tsk->thread.fpu.hard.fp_regs[m + 1];
+			    | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
 			if ((finsn & 0xf00f) == 0xf000)
 				llx = float64_add(llx, lly);
 			else
 				llx = float64_sub(llx, lly);
-			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
-			tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+			tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+			tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
 		} else if ((fpscr & FPSCR_CAUSE_ERROR)
 			   && (!prec && ((hx & 0x7fffffff) < 0x00800000
 					 || (hy & 0x7fffffff) < 0x00800000))) {
@@ -367,7 +316,7 @@
 				hx = float32_add(hx, hy);
 			else
 				hx = float32_sub(hx, hy);
-			tsk->thread.fpu.hard.fp_regs[n] = hx;
+			tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
 		} else
 			return 0;
 
@@ -382,9 +331,9 @@
 
 		n = (finsn >> 8) & 0xf;
 		m = (finsn >> 4) & 0xf;
-		hx = tsk->thread.fpu.hard.fp_regs[n];
-		hy = tsk->thread.fpu.hard.fp_regs[m];
-		fpscr = tsk->thread.fpu.hard.fpscr;
+		hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+		hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+		fpscr = tsk->thread.xstate->hardfpu.fpscr;
 		prec = fpscr & FPSCR_DBL_PRECISION;
 
 		if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -394,20 +343,20 @@
 
 			/* FPU error because of denormal (doubles) */
 			llx = ((long long)hx << 32)
-			    | tsk->thread.fpu.hard.fp_regs[n + 1];
+			    | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
 			lly = ((long long)hy << 32)
-			    | tsk->thread.fpu.hard.fp_regs[m + 1];
+			    | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
 
 			llx = float64_div(llx, lly);
 
-			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
-			tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+			tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+			tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
 		} else if ((fpscr & FPSCR_CAUSE_ERROR)
 			   && (!prec && ((hx & 0x7fffffff) < 0x00800000
 					 || (hy & 0x7fffffff) < 0x00800000))) {
 			/* FPU error because of denormal (floats) */
 			hx = float32_div(hx, hy);
-			tsk->thread.fpu.hard.fp_regs[n] = hx;
+			tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
 		} else
 			return 0;
 
@@ -420,17 +369,17 @@
 		unsigned int hx;
 
 		m = (finsn >> 8) & 0x7;
-		hx = tsk->thread.fpu.hard.fp_regs[m];
+		hx = tsk->thread.xstate->hardfpu.fp_regs[m];
 
-		if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)
+		if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)
 			&& ((hx & 0x7fffffff) < 0x00100000)) {
 			/* subnormal double to float conversion */
 			long long llx;
 
-			llx = ((long long)tsk->thread.fpu.hard.fp_regs[m] << 32)
-			    | tsk->thread.fpu.hard.fp_regs[m + 1];
+			llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32)
+			    | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
 
-			tsk->thread.fpu.hard.fpul = float64_to_float32(llx);
+			tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx);
 		} else
 			return 0;
 
@@ -449,7 +398,7 @@
 int float_rounding_mode(void)
 {
 	struct task_struct *tsk = current;
-	int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.fpu.hard.fpscr);
+	int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr);
 	return roundingMode;
 }
 
@@ -461,16 +410,16 @@
 	__unlazy_fpu(tsk, regs);
 	fpu_exception_flags = 0;
 	if (ieee_fpe_handler(regs)) {
-		tsk->thread.fpu.hard.fpscr &=
+		tsk->thread.xstate->hardfpu.fpscr &=
 		    ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
-		tsk->thread.fpu.hard.fpscr |= fpu_exception_flags;
+		tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags;
 		/* Set the FPSCR flag as well as cause bits - simply
 		 * replicate the cause */
-		tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10);
+		tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10);
 		grab_fpu(regs);
 		restore_fpu(tsk);
 		task_thread_info(tsk)->status |= TS_USEDFPU;
-		if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) &
+		if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) &
 		     (fpu_exception_flags >> 2)) == 0) {
 			return;
 		}
@@ -478,33 +427,3 @@
 
 	force_sig(SIGFPE, tsk);
 }
-
-void fpu_state_restore(struct pt_regs *regs)
-{
-	struct task_struct *tsk = current;
-
-	grab_fpu(regs);
-	if (unlikely(!user_mode(regs))) {
-		printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
-		BUG();
-		return;
-	}
-
-	if (likely(used_math())) {
-		/* Using the FPU again.  */
-		restore_fpu(tsk);
-	} else {
-		/* First time FPU user.  */
-		fpu_init();
-		set_used_math();
-	}
-	task_thread_info(tsk)->status |= TS_USEDFPU;
-	tsk->fpu_counter++;
-}
-
-BUILD_TRAP_HANDLER(fpu_state_restore)
-{
-	TRAP_HANDLER_DECL;
-
-	fpu_state_restore(regs);
-}
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index d36f0c4..822977a 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -28,9 +28,9 @@
 		[9] = (1 << 16)
 	};
 
-	pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffffff;
-	prr = (ctrl_inl(CCN_PRR) >> 4) & 0xff;
-	cvr = (ctrl_inl(CCN_CVR));
+	pvr = (__raw_readl(CCN_PVR) >> 8) & 0xffffff;
+	prr = (__raw_readl(CCN_PRR) >> 4) & 0xff;
+	cvr = (__raw_readl(CCN_CVR));
 
 	/*
 	 * Setup some sane SH-4 defaults for the icache
@@ -71,11 +71,11 @@
 		boot_cpu_data.dcache.ways = 4;
 	} else {
 		/* And some SH-4 defaults.. */
-		boot_cpu_data.flags |= CPU_HAS_PTEA;
+		boot_cpu_data.flags |= CPU_HAS_PTEA | CPU_HAS_FPU;
 		boot_cpu_data.family = CPU_FAMILY_SH4;
 	}
 
-	/* FPU detection works for everyone */
+	/* FPU detection works for almost everyone */
 	if ((cvr & 0x20000000))
 		boot_cpu_data.flags |= CPU_HAS_FPU;
 
@@ -124,6 +124,7 @@
 		boot_cpu_data.type = CPU_SH7785;
 		break;
 	case 0x4004:
+	case 0x4005:
 		boot_cpu_data.type = CPU_SH7786;
 		boot_cpu_data.flags |= CPU_HAS_PTEAEX | CPU_HAS_L2_CACHE;
 		break;
@@ -160,6 +161,7 @@
 		break;
 	case 0x700:
 		boot_cpu_data.type = CPU_SH4_501;
+		boot_cpu_data.flags &= ~CPU_HAS_FPU;
 		boot_cpu_data.icache.ways = 2;
 		boot_cpu_data.dcache.ways = 2;
 		break;
@@ -227,7 +229,7 @@
 			 * Size calculation is much more sensible
 			 * than it is for the L1.
 			 *
-			 * Sizes are 128KB, 258KB, 512KB, and 1MB.
+			 * Sizes are 128KB, 256KB, 512KB, and 1MB.
 			 */
 			size = (cvr & 0xf) << 17;
 
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index 4b73371..b9b7e10 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -198,7 +198,7 @@
 {
 	switch (mode) {
 	case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */
-		ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+		__raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
 		register_intc_controller(&intc_desc_irlm);
 		break;
 	default:
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index b2a9df1..ffd79e5 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -442,7 +442,7 @@
 
 	switch (mode) {
 	case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */
-		ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+		__raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
 		register_intc_controller(&intc_desc_irlm);
 		break;
 	default:
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 5b74cc0..a16eb36 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -319,7 +319,7 @@
 {
 	switch (mode) {
 	case IRQ_MODE_IRQ:
-		ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+		__raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
 		register_intc_controller(&intc_desc_irq);
 		break;
 	default:
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 8a8a993..fc065f9 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -43,9 +43,9 @@
 
 #define store_queue_barrier()			\
 do {						\
-	(void)ctrl_inl(P4SEG_STORE_QUE);	\
-	ctrl_outl(0, P4SEG_STORE_QUE + 0);	\
-	ctrl_outl(0, P4SEG_STORE_QUE + 8);	\
+	(void)__raw_readl(P4SEG_STORE_QUE);	\
+	__raw_writel(0, P4SEG_STORE_QUE + 0);	\
+	__raw_writel(0, P4SEG_STORE_QUE + 8);	\
 } while (0);
 
 /**
@@ -100,7 +100,7 @@
 	spin_unlock_irq(&sq_mapping_lock);
 }
 
-static int __sq_remap(struct sq_mapping *map, unsigned long flags)
+static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
 {
 #if defined(CONFIG_MMU)
 	struct vm_struct *vma;
@@ -113,7 +113,7 @@
 
 	if (ioremap_page_range((unsigned long)vma->addr,
 			       (unsigned long)vma->addr + map->size,
-			       vma->phys_addr, __pgprot(flags))) {
+			       vma->phys_addr, prot)) {
 		vunmap(vma->addr);
 		return -EAGAIN;
 	}
@@ -123,8 +123,8 @@
 	 * straightforward, as we can just load up each queue's QACR with
 	 * the physical address appropriately masked.
 	 */
-	ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
-	ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
+	__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
+	__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
 #endif
 
 	return 0;
@@ -135,14 +135,14 @@
  * @phys: Physical address of mapping.
  * @size: Length of mapping.
  * @name: User invoking mapping.
- * @flags: Protection flags.
+ * @prot: Protection bits.
  *
  * Remaps the physical address @phys through the next available store queue
  * address of @size length. @name is logged at boot time as well as through
  * the sysfs interface.
  */
 unsigned long sq_remap(unsigned long phys, unsigned int size,
-		       const char *name, unsigned long flags)
+		       const char *name, pgprot_t prot)
 {
 	struct sq_mapping *map;
 	unsigned long end;
@@ -177,7 +177,7 @@
 
 	map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
 
-	ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
+	ret = __sq_remap(map, prot);
 	if (unlikely(ret != 0))
 		goto out;
 
@@ -309,8 +309,7 @@
 		return -EIO;
 
 	if (likely(len)) {
-		int ret = sq_remap(base, len, "Userspace",
-				   pgprot_val(PAGE_SHARED));
+		int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
 		if (ret < 0)
 			return ret;
 	} else
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 33bab47..b144e8a 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -41,7 +41,8 @@
 pinmux-$(CONFIG_CPU_SUBTYPE_SH7785)	:= pinmux-sh7785.o
 pinmux-$(CONFIG_CPU_SUBTYPE_SH7786)	:= pinmux-sh7786.o
 
-obj-y				+= $(clock-y)
-obj-$(CONFIG_SMP)		+= $(smp-y)
-obj-$(CONFIG_GENERIC_GPIO)	+= $(pinmux-y)
-obj-$(CONFIG_PERF_EVENTS)	+= perf_event.o
+obj-y					+= $(clock-y)
+obj-$(CONFIG_SMP)			+= $(smp-y)
+obj-$(CONFIG_GENERIC_GPIO)		+= $(pinmux-y)
+obj-$(CONFIG_PERF_EVENTS)		+= perf_event.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= ubc.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
index 0ee3ee8..2c16df3 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
@@ -107,13 +107,17 @@
 static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
 static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
 
-static struct clk_div_mult_table div4_table = {
+static struct clk_div_mult_table div4_div_mult_table = {
 	.divisors = divisors,
 	.nr_divisors = ARRAY_SIZE(divisors),
 	.multipliers = multipliers,
 	.nr_multipliers = ARRAY_SIZE(multipliers),
 };
 
+static struct clk_div4_table div4_table = {
+	.div_mult_table = &div4_div_mult_table,
+};
+
 enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
        DIV4_SIUA, DIV4_SIUB, DIV4_NR };
 
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
index a95ebab..91588d2 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
@@ -110,13 +110,17 @@
 static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
 static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
 
-static struct clk_div_mult_table div4_table = {
+static struct clk_div_mult_table div4_div_mult_table = {
 	.divisors = divisors,
 	.nr_divisors = ARRAY_SIZE(divisors),
 	.multipliers = multipliers,
 	.nr_multipliers = ARRAY_SIZE(multipliers),
 };
 
+static struct clk_div4_table div4_table = {
+	.div_mult_table = &div4_div_mult_table,
+};
+
 enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
        DIV4_SIUA, DIV4_SIUB, DIV4_NR };
 
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index ea38b55..15db6d5 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -110,19 +110,22 @@
 static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
 static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
 
-static struct clk_div_mult_table div4_table = {
+static struct clk_div_mult_table div4_div_mult_table = {
 	.divisors = divisors,
 	.nr_divisors = ARRAY_SIZE(divisors),
 	.multipliers = multipliers,
 	.nr_multipliers = ARRAY_SIZE(multipliers),
 };
 
-enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
-       DIV4_SIUA, DIV4_SIUB, DIV4_IRDA, DIV4_NR };
+static struct clk_div4_table div4_table = {
+	.div_mult_table = &div4_div_mult_table,
+};
 
 #define DIV4(_str, _reg, _bit, _mask, _flags) \
   SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags)
 
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
+
 struct clk div4_clks[DIV4_NR] = {
 	[DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
 	[DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
@@ -130,9 +133,19 @@
 	[DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
 	[DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
 	[DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x1fff, 0),
+};
+
+enum { DIV4_IRDA, DIV4_ENABLE_NR };
+
+struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
+	[DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x1fff, 0),
+};
+
+enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
+
+struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
 	[DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x1fff, 0),
 	[DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0),
-	[DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x1fff, 0),
 };
 
 struct clk div6_clks[] = {
@@ -189,6 +202,14 @@
 		ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
 
 	if (!ret)
+		ret = sh_clk_div4_enable_register(div4_enable_clks,
+					DIV4_ENABLE_NR, &div4_table);
+
+	if (!ret)
+		ret = sh_clk_div4_reparent_register(div4_reparent_clks,
+					DIV4_REPARENT_NR, &div4_table);
+
+	if (!ret)
 		ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
 
 	if (!ret)
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
index 20a31c2..50babe0 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -110,15 +110,18 @@
 static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
 static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
 
-static struct clk_div_mult_table div4_table = {
+static struct clk_div_mult_table div4_div_mult_table = {
 	.divisors = divisors,
 	.nr_divisors = ARRAY_SIZE(divisors),
 	.multipliers = multipliers,
 	.nr_multipliers = ARRAY_SIZE(multipliers),
 };
 
-enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
-       DIV4_SIUA, DIV4_SIUB, DIV4_IRDA, DIV4_NR };
+static struct clk_div4_table div4_table = {
+	.div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
 
 #define DIV4(_str, _reg, _bit, _mask, _flags) \
   SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags)
@@ -130,11 +133,20 @@
 	[DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT),
 	[DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT),
 	[DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x0dbf, 0),
-	[DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x0dbf, 0),
-	[DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x0dbf, 0),
+};
+
+enum { DIV4_IRDA, DIV4_ENABLE_NR };
+
+struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
 	[DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x0dbf, 0),
 };
 
+enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
+
+struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
+	[DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x0dbf, 0),
+	[DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x0dbf, 0),
+};
 struct clk div6_clks[] = {
 	SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
 };
@@ -216,6 +228,14 @@
 		ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
 
 	if (!ret)
+		ret = sh_clk_div4_enable_register(div4_enable_clks,
+					DIV4_ENABLE_NR, &div4_table);
+
+	if (!ret)
+		ret = sh_clk_div4_reparent_register(div4_reparent_clks,
+					DIV4_REPARENT_NR, &div4_table);
+
+	if (!ret)
 		ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
 
 	if (!ret)
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 9db7438..6707061 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -127,13 +127,28 @@
 	&div3_clk,
 };
 
+static void div4_kick(struct clk *clk)
+{
+	unsigned long value;
+
+	/* set KICK bit in FRQCRA to update hardware setting */
+	value = __raw_readl(FRQCRA);
+	value |= (1 << 31);
+	__raw_writel(value, FRQCRA);
+}
+
 static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 };
 
-static struct clk_div_mult_table div4_table = {
+static struct clk_div_mult_table div4_div_mult_table = {
 	.divisors = divisors,
 	.nr_divisors = ARRAY_SIZE(divisors),
 };
 
+static struct clk_div4_table div4_table = {
+	.div_mult_table = &div4_div_mult_table,
+	.kick = div4_kick,
+};
+
 enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR };
 
 #define DIV4(_str, _reg, _bit, _mask, _flags) \
@@ -144,7 +159,7 @@
 	[DIV4_SH] = DIV4("shyway_clk", FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT),
 	[DIV4_B] = DIV4("bus_clk", FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT),
 	[DIV4_P] = DIV4("peripheral_clk", FRQCRA, 0, 0x2f7c, 0),
-	[DIV4_M1] = DIV4("vpu_clk", FRQCRB, 4, 0x2f7c, 0),
+	[DIV4_M1] = DIV4("vpu_clk", FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT),
 };
 
 struct clk div6_clks[] = {
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
index ddc235c..86aae60 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -35,7 +35,7 @@
 
 static void module_clk_recalc(struct clk *clk)
 {
-	int idx = ctrl_inl(FRQCR) & 0x0000000f;
+	int idx = __raw_readl(FRQCR) & 0x0000000f;
 	clk->rate = clk->parent->rate / p1fc_divisors[idx];
 }
 
@@ -45,7 +45,7 @@
 
 static void bus_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inl(FRQCR) >> 8) & 0x0000000f;
+	int idx = (__raw_readl(FRQCR) >> 8) & 0x0000000f;
 	clk->rate = clk->parent->rate / bfc_divisors[idx];
 }
 
@@ -55,7 +55,7 @@
 
 static void cpu_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inl(FRQCR) >> 20) & 0x0000000f;
+	int idx = (__raw_readl(FRQCR) >> 20) & 0x0000000f;
 	clk->rate = clk->parent->rate / ifc_divisors[idx];
 }
 
@@ -78,7 +78,7 @@
 
 static void shyway_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inl(FRQCR) >> 12) & 0x0000000f;
+	int idx = (__raw_readl(FRQCR) >> 12) & 0x0000000f;
 	clk->rate = clk->parent->rate / sfc_divisors[idx];
 }
 
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
index 370cd47..9f40116 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
@@ -22,7 +22,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	clk->rate *= p0fc_divisors[(ctrl_inl(FRQCR) >> 4) & 0x07];
+	clk->rate *= p0fc_divisors[(__raw_readl(FRQCR) >> 4) & 0x07];
 }
 
 static struct clk_ops sh7763_master_clk_ops = {
@@ -31,7 +31,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inl(FRQCR) >> 4) & 0x07);
+	int idx = ((__raw_readl(FRQCR) >> 4) & 0x07);
 	return clk->parent->rate / p0fc_divisors[idx];
 }
 
@@ -41,7 +41,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inl(FRQCR) >> 16) & 0x07);
+	int idx = ((__raw_readl(FRQCR) >> 16) & 0x07);
 	return clk->parent->rate / bfc_divisors[idx];
 }
 
@@ -68,7 +68,7 @@
 
 static unsigned long shyway_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inl(FRQCR) >> 20) & 0x07);
+	int idx = ((__raw_readl(FRQCR) >> 20) & 0x07);
 	return clk->parent->rate / cfc_divisors[idx];
 }
 
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
index e0b8967..9e33543 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
@@ -21,7 +21,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	clk->rate *= pfc_divisors[(ctrl_inl(FRQCR) >> 28) & 0x000f];
+	clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> 28) & 0x000f];
 }
 
 static struct clk_ops sh7770_master_clk_ops = {
@@ -30,7 +30,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inl(FRQCR) >> 28) & 0x000f);
+	int idx = ((__raw_readl(FRQCR) >> 28) & 0x000f);
 	return clk->parent->rate / pfc_divisors[idx];
 }
 
@@ -40,7 +40,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inl(FRQCR) & 0x000f);
+	int idx = (__raw_readl(FRQCR) & 0x000f);
 	return clk->parent->rate / bfc_divisors[idx];
 }
 
@@ -50,7 +50,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inl(FRQCR) >> 24) & 0x000f);
+	int idx = ((__raw_readl(FRQCR) >> 24) & 0x000f);
 	return clk->parent->rate / ifc_divisors[idx];
 }
 
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
index a249d82..150963a 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
@@ -22,7 +22,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	clk->rate *= pfc_divisors[ctrl_inl(FRQCR) & 0x0003];
+	clk->rate *= pfc_divisors[__raw_readl(FRQCR) & 0x0003];
 }
 
 static struct clk_ops sh7780_master_clk_ops = {
@@ -31,7 +31,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inl(FRQCR) & 0x0003);
+	int idx = (__raw_readl(FRQCR) & 0x0003);
 	return clk->parent->rate / pfc_divisors[idx];
 }
 
@@ -41,7 +41,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inl(FRQCR) >> 16) & 0x0007);
+	int idx = ((__raw_readl(FRQCR) >> 16) & 0x0007);
 	return clk->parent->rate / bfc_divisors[idx];
 }
 
@@ -51,7 +51,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inl(FRQCR) >> 24) & 0x0001);
+	int idx = ((__raw_readl(FRQCR) >> 24) & 0x0001);
 	return clk->parent->rate / ifc_divisors[idx];
 }
 
@@ -74,7 +74,7 @@
 
 static unsigned long shyway_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inl(FRQCR) >> 20) & 0x0007);
+	int idx = ((__raw_readl(FRQCR) >> 20) & 0x0007);
 	return clk->parent->rate / cfc_divisors[idx];
 }
 
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
index 73abfbf..d997f0a 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
@@ -57,11 +57,15 @@
 static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
 			       24, 32, 36, 48 };
 
-static struct clk_div_mult_table div4_table = {
+static struct clk_div_mult_table div4_div_mult_table = {
 	.divisors = div2,
 	.nr_divisors = ARRAY_SIZE(div2),
 };
 
+static struct clk_div4_table div4_table = {
+	.div_mult_table = &div4_div_mult_table,
+};
+
 enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA,
 	DIV4_DU, DIV4_P, DIV4_NR };
 
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
index a0e8869..af69fd4 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
@@ -3,11 +3,7 @@
  *
  * SH7786 support for the clock framework
  *
- * Copyright (C) 2008, 2009  Renesas Solutions Corp.
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * Based on SH7785
- *  Copyright (C) 2007  Paul Mundt
+ *  Copyright (C) 2010  Paul Mundt
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -15,127 +11,127 @@
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
 #include <asm/clock.h>
 #include <asm/freq.h>
-#include <asm/io.h>
-
-static int ifc_divisors[] = { 1, 2, 4, 1 };
-static int sfc_divisors[] = { 1, 1, 4, 1 };
-static int bfc_divisors[] = { 1, 1, 1, 1, 1, 12, 16, 1,
-			     24, 32, 1, 1, 1, 1, 1, 1 };
-static int mfc_divisors[] = { 1, 1, 4, 1 };
-static int pfc_divisors[] = { 1, 1, 1, 1, 1, 1, 16, 1,
-			      24, 32, 1, 48, 1, 1, 1, 1 };
-
-static void master_clk_init(struct clk *clk)
-{
-	clk->rate *= pfc_divisors[ctrl_inl(FRQMR1) & 0x000f];
-}
-
-static struct clk_ops sh7786_master_clk_ops = {
-	.init		= master_clk_init,
-};
-
-static unsigned long module_clk_recalc(struct clk *clk)
-{
-	int idx = (ctrl_inl(FRQMR1) & 0x000f);
-	return clk->parent->rate / pfc_divisors[idx];
-}
-
-static struct clk_ops sh7786_module_clk_ops = {
-	.recalc		= module_clk_recalc,
-};
-
-static unsigned long bus_clk_recalc(struct clk *clk)
-{
-	int idx = ((ctrl_inl(FRQMR1) >> 16) & 0x000f);
-	return clk->parent->rate / bfc_divisors[idx];
-}
-
-static struct clk_ops sh7786_bus_clk_ops = {
-	.recalc		= bus_clk_recalc,
-};
-
-static unsigned long cpu_clk_recalc(struct clk *clk)
-{
-	int idx = ((ctrl_inl(FRQMR1) >> 28) & 0x0003);
-	return clk->parent->rate / ifc_divisors[idx];
-}
-
-static struct clk_ops sh7786_cpu_clk_ops = {
-	.recalc		= cpu_clk_recalc,
-};
-
-static struct clk_ops *sh7786_clk_ops[] = {
-	&sh7786_master_clk_ops,
-	&sh7786_module_clk_ops,
-	&sh7786_bus_clk_ops,
-	&sh7786_cpu_clk_ops,
-};
-
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
-{
-	if (idx < ARRAY_SIZE(sh7786_clk_ops))
-		*ops = sh7786_clk_ops[idx];
-}
-
-static unsigned long shyway_clk_recalc(struct clk *clk)
-{
-	int idx = ((ctrl_inl(FRQMR1) >> 20) & 0x0003);
-	return clk->parent->rate / sfc_divisors[idx];
-}
-
-static struct clk_ops sh7786_shyway_clk_ops = {
-	.recalc		= shyway_clk_recalc,
-};
-
-static struct clk sh7786_shyway_clk = {
-	.name		= "shyway_clk",
-	.flags		= CLK_ENABLE_ON_INIT,
-	.ops		= &sh7786_shyway_clk_ops,
-};
-
-static unsigned long ddr_clk_recalc(struct clk *clk)
-{
-	int idx = ((ctrl_inl(FRQMR1) >> 12) & 0x0003);
-	return clk->parent->rate / mfc_divisors[idx];
-}
-
-static struct clk_ops sh7786_ddr_clk_ops = {
-	.recalc		= ddr_clk_recalc,
-};
-
-static struct clk sh7786_ddr_clk = {
-	.name		= "ddr_clk",
-	.flags		= CLK_ENABLE_ON_INIT,
-	.ops		= &sh7786_ddr_clk_ops,
-};
 
 /*
- * Additional SH7786-specific on-chip clocks that aren't already part of the
- * clock framework
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
  */
-static struct clk *sh7786_onchip_clocks[] = {
-	&sh7786_shyway_clk,
-	&sh7786_ddr_clk,
+static struct clk extal_clk = {
+	.name		= "extal",
+	.id		= -1,
+	.rate		= 33333333,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+	int multiplier;
+
+	/*
+	 * Clock modes 0, 1, and 2 use an x64 multiplier against PLL1,
+	 * while modes 3, 4, and 5 use an x32.
+	 */
+	multiplier = (sh_mv.mv_mode_pins() & 0xf) < 3 ? 64 : 32;
+
+	return clk->parent->rate * multiplier;
+}
+
+static struct clk_ops pll_clk_ops = {
+	.recalc		= pll_recalc,
+};
+
+static struct clk pll_clk = {
+	.name		= "pll_clk",
+	.id		= -1,
+	.ops		= &pll_clk_ops,
+	.parent		= &extal_clk,
+	.flags		= CLK_ENABLE_ON_INIT,
+};
+
+static struct clk *clks[] = {
+	&extal_clk,
+	&pll_clk,
+};
+
+static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
+			       24, 32, 36, 48 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+	.divisors = div2,
+	.nr_divisors = ARRAY_SIZE(div2),
+};
+
+static struct clk_div4_table div4_table = {
+	.div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_DU, DIV4_P, DIV4_NR };
+
+#define DIV4(_str, _bit, _mask, _flags) \
+  SH_CLK_DIV4(_str, &pll_clk, FRQMR1, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+	[DIV4_P] = DIV4("peripheral_clk", 0, 0x0b40, 0),
+	[DIV4_DU] = DIV4("du_clk", 4, 0x0010, 0),
+	[DIV4_DDR] = DIV4("ddr_clk", 12, 0x0002, CLK_ENABLE_ON_INIT),
+	[DIV4_B] = DIV4("bus_clk", 16, 0x0360, CLK_ENABLE_ON_INIT),
+	[DIV4_SH] = DIV4("shyway_clk", 20, 0x0002, CLK_ENABLE_ON_INIT),
+	[DIV4_I] = DIV4("cpu_clk", 28, 0x0006, CLK_ENABLE_ON_INIT),
+};
+
+#define MSTPCR0		0xffc40030
+#define MSTPCR1		0xffc40034
+
+static struct clk mstp_clks[] = {
+	/* MSTPCR0 */
+	SH_CLK_MSTP32("scif_fck", 5, &div4_clks[DIV4_P], MSTPCR0, 29, 0),
+	SH_CLK_MSTP32("scif_fck", 4, &div4_clks[DIV4_P], MSTPCR0, 28, 0),
+	SH_CLK_MSTP32("scif_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 27, 0),
+	SH_CLK_MSTP32("scif_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 26, 0),
+	SH_CLK_MSTP32("scif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 25, 0),
+	SH_CLK_MSTP32("scif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 24, 0),
+	SH_CLK_MSTP32("ssi_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 23, 0),
+	SH_CLK_MSTP32("ssi_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 22, 0),
+	SH_CLK_MSTP32("ssi_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 21, 0),
+	SH_CLK_MSTP32("ssi_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 20, 0),
+	SH_CLK_MSTP32("hac_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 17, 0),
+	SH_CLK_MSTP32("hac_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 16, 0),
+	SH_CLK_MSTP32("i2c_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 15, 0),
+	SH_CLK_MSTP32("i2c_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 14, 0),
+	SH_CLK_MSTP32("tmu9_11_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 11, 0),
+	SH_CLK_MSTP32("tmu678_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 10, 0),
+	SH_CLK_MSTP32("tmu345_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 9, 0),
+	SH_CLK_MSTP32("tmu012_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 8, 0),
+	SH_CLK_MSTP32("sdif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 5, 0),
+	SH_CLK_MSTP32("sdif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 4, 0),
+	SH_CLK_MSTP32("hspi_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 2, 0),
+
+	/* MSTPCR1 */
+	SH_CLK_MSTP32("usb_fck", -1, NULL, MSTPCR1, 12, 0),
+	SH_CLK_MSTP32("pcie_fck", 2, NULL, MSTPCR1, 10, 0),
+	SH_CLK_MSTP32("pcie_fck", 1, NULL, MSTPCR1, 9, 0),
+	SH_CLK_MSTP32("pcie_fck", 0, NULL, MSTPCR1, 8, 0),
+	SH_CLK_MSTP32("dmac_11_6_fck", -1, NULL, MSTPCR1, 5, 0),
+	SH_CLK_MSTP32("dmac_5_0_fck", -1, NULL, MSTPCR1, 4, 0),
+	SH_CLK_MSTP32("du_fck", -1, NULL, MSTPCR1, 3, 0),
+	SH_CLK_MSTP32("ether_fck", -1, NULL, MSTPCR1, 2, 0),
 };
 
 int __init arch_clk_init(void)
 {
-	struct clk *clk;
 	int i, ret = 0;
 
-	cpg_clk_init();
+	for (i = 0; i < ARRAY_SIZE(clks); i++)
+		ret |= clk_register(clks[i]);
 
-	clk = clk_get(NULL, "master_clk");
-	for (i = 0; i < ARRAY_SIZE(sh7786_onchip_clocks); i++) {
-		struct clk *clkp = sh7786_onchip_clocks[i];
-
-		clkp->parent = clk;
-		ret |= clk_register(clkp);
-	}
-
-	clk_put(clk);
+	if (!ret)
+		ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+					   &div4_table);
+	if (!ret)
+		ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
 
 	return ret;
 }
diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
index 23c27d3..e75c57b 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
@@ -33,7 +33,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	clk->rate *= pfc_divisors[(ctrl_inl(FRQCR) >> PFC_POS) & PFC_MSK];
+	clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> PFC_POS) & PFC_MSK];
 }
 
 static struct clk_ops shx3_master_clk_ops = {
@@ -42,7 +42,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inl(FRQCR) >> PFC_POS) & PFC_MSK);
+	int idx = ((__raw_readl(FRQCR) >> PFC_POS) & PFC_MSK);
 	return clk->parent->rate / pfc_divisors[idx];
 }
 
@@ -52,7 +52,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inl(FRQCR) >> BFC_POS) & BFC_MSK);
+	int idx = ((__raw_readl(FRQCR) >> BFC_POS) & BFC_MSK);
 	return clk->parent->rate / bfc_divisors[idx];
 }
 
@@ -62,7 +62,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inl(FRQCR) >> IFC_POS) & IFC_MSK);
+	int idx = ((__raw_readl(FRQCR) >> IFC_POS) & IFC_MSK);
 	return clk->parent->rate / ifc_divisors[idx];
 }
 
@@ -85,7 +85,7 @@
 
 static unsigned long shyway_clk_recalc(struct clk *clk)
 {
-	int idx = ((ctrl_inl(FRQCR) >> CFC_POS) & CFC_MSK);
+	int idx = ((__raw_readl(FRQCR) >> CFC_POS) & CFC_MSK);
 	return clk->parent->rate / cfc_divisors[idx];
 }
 
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
index cb9d07b..0688a75 100644
--- a/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
@@ -278,6 +278,7 @@
 	HIZA8_LCDC, HIZA8_HIZ,
 	HIZA7_LCDC, HIZA7_HIZ,
 	HIZA6_LCDC, HIZA6_HIZ,
+	HIZB4_SIUA, HIZB4_HIZ,
 	HIZB1_VIO, HIZB1_HIZ,
 	HIZB0_VIO, HIZB0_HIZ,
 	HIZC15_IRQ7, HIZC15_HIZ,
@@ -546,7 +547,7 @@
 	PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2,
 		    HIZB0_VIO, FOE_VIO_VD2),
 	PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2,
-		    HIZB1_VIO, HIZB1_VIO, FCE_VIO_HD2),
+		    HIZB1_VIO, FCE_VIO_HD2),
 	PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2,
 		    HIZB1_VIO, FRB_VIO_CLK2),
 
@@ -658,14 +659,14 @@
 	PINMUX_DATA(SDHICLK_MARK, SDHICLK),
 
 	/* SIU - Port A */
-	PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, SIUAOLR_SIOF1_SYNC),
-	PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, SIUAOBT_SIOF1_SCK),
-	PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, SIUAISLD_SIOF1_RXD),
-	PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, SIUAILR_SIOF1_SS2),
-	PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, SIUAIBT_SIOF1_SS1),
-	PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, SIUAOSLD_SIOF1_TXD),
-	PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, PSB1_SIUMCKA, PTK0),
-	PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, PTK0),
+	PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, HIZB4_SIUA, SIUAOLR_SIOF1_SYNC),
+	PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, HIZB4_SIUA, SIUAOBT_SIOF1_SCK),
+	PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, HIZB4_SIUA, SIUAISLD_SIOF1_RXD),
+	PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, HIZB4_SIUA, SIUAILR_SIOF1_SS2),
+	PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, HIZB4_SIUA, SIUAIBT_SIOF1_SS1),
+	PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, HIZB4_SIUA, SIUAOSLD_SIOF1_TXD),
+	PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, HIZB4_SIUA, PSB1_SIUMCKA, PTK0),
+	PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, HIZB4_SIUA, PTK0),
 
 	/* SIU - Port B */
 	PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR),
@@ -1612,7 +1613,7 @@
 		0, 0,
 		0, 0,
 		0, 0,
-		0, 0,
+		HIZB4_SIUA, HIZB4_HIZ,
 		0, 0,
 		0, 0,
 		HIZB1_VIO, HIZB1_HIZ,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index b5335b5..ef3f978 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -446,6 +446,8 @@
 
 enum {
 	UNUSED=0,
+	ENABLED,
+	DISABLED,
 
 	/* interrupt sources */
 	IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -461,7 +463,6 @@
 	SCIF0, SCIF1, SCIF2, SIOF0, SIOF1, SIO,
 	FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
 	I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
-	SDHI0, SDHI1, SDHI2, SDHI3,
 	CMT, TSIF, SIU, TWODG,
 	TMU0, TMU1, TMU2,
 	IRDA, JPU, LCDC,
@@ -494,8 +495,8 @@
 	INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
 	INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
 	INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
-	INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
-	INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
+	INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+	INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
 	INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
 	INTC_VECT(SIU, 0xf80), INTC_VECT(TWODG, 0xfa0),
 	INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
@@ -513,7 +514,6 @@
 	INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
 		   FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
 	INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
-	INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
 };
 
 static struct intc_mask_reg mask_registers[] __initdata = {
@@ -535,7 +535,7 @@
 	  { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, TWODG, SIU } },
+	  { DISABLED, DISABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } },
 	{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -573,9 +573,13 @@
 	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
 };
 
-static DECLARE_INTC_DESC_ACK(intc_desc, "sh7722", vectors, groups,
-			     mask_registers, prio_registers, sense_registers,
-			     ack_registers);
+static struct intc_desc intc_desc __initdata = {
+	.name = "sh7722",
+	.force_enable = ENABLED,
+	.force_disable = DISABLED,
+	.hw = INTC_HW_DESC(vectors, groups, mask_registers,
+			   prio_registers, sense_registers, ack_registers),
+};
 
 void __init plat_irq_setup(void)
 {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 772b926..85c61f6 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -592,14 +592,17 @@
 #define RAMCR_CACHE_L2FC	0x0002
 #define RAMCR_CACHE_L2E		0x0001
 #define L2_CACHE_ENABLE		(RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
-void __uses_jump_to_uncached l2_cache_init(void)
+
+void l2_cache_init(void)
 {
 	/* Enable L2 cache */
-	ctrl_outl(L2_CACHE_ENABLE, RAMCR);
+	__raw_writel(L2_CACHE_ENABLE, RAMCR);
 }
 
 enum {
 	UNUSED=0,
+	ENABLED,
+	DISABLED,
 
 	/* interrupt sources */
 	IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -622,7 +625,6 @@
 	SCIFA_SCIFA1,
 	FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I,
 	I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI,
-	SDHI0_SDHII0,SDHI0_SDHII1,SDHI0_SDHII2,
 	CMT_CMTI,
 	TSIF_TSIFI,
 	SIU_SIUI,
@@ -630,7 +632,6 @@
 	TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2,
 	IRDA_IRDAI,
 	ATAPI_ATAPII,
-	SDHI1_SDHII0,SDHI1_SDHII1,SDHI1_SDHII2,
 	VEU2H1_VEU2HI,
 	LCDC_LCDCI,
 	TMU1_TUNI0,TMU1_TUNI1,TMU1_TUNI2,
@@ -701,9 +702,9 @@
 	INTC_VECT(I2C_WAITI,0xE40),
 	INTC_VECT(I2C_DTEI,0xE60),
 
-	INTC_VECT(SDHI0_SDHII0,0xE80),
-	INTC_VECT(SDHI0_SDHII1,0xEA0),
-	INTC_VECT(SDHI0_SDHII2,0xEC0),
+	INTC_VECT(SDHI0, 0xE80),
+	INTC_VECT(SDHI0, 0xEA0),
+	INTC_VECT(SDHI0, 0xEC0),
 
 	INTC_VECT(CMT_CMTI,0xF00),
 	INTC_VECT(TSIF_TSIFI,0xF20),
@@ -717,9 +718,9 @@
 	INTC_VECT(IRDA_IRDAI,0x480),
 	INTC_VECT(ATAPI_ATAPII,0x4A0),
 
-	INTC_VECT(SDHI1_SDHII0,0x4E0),
-	INTC_VECT(SDHI1_SDHII1,0x500),
-	INTC_VECT(SDHI1_SDHII2,0x520),
+	INTC_VECT(SDHI1, 0x4E0),
+	INTC_VECT(SDHI1, 0x500),
+	INTC_VECT(SDHI1, 0x520),
 
 	INTC_VECT(VEU2H1_VEU2HI,0x560),
 	INTC_VECT(LCDC_LCDCI,0x580),
@@ -738,15 +739,14 @@
 	INTC_GROUP(FLCTL,FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I),
 	INTC_GROUP(I2C,I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI),
 	INTC_GROUP(_2DG, _2DG_TRI,_2DG_INI,_2DG_CEI),
-	INTC_GROUP(SDHI1, SDHI1_SDHII0,SDHI1_SDHII1,SDHI1_SDHII2),
 	INTC_GROUP(RTC, RTC_ATI,RTC_PRI,RTC_CUI),
 	INTC_GROUP(DMAC1B, DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR),
-	INTC_GROUP(SDHI0,SDHI0_SDHII0,SDHI0_SDHII1,SDHI0_SDHII2),
 };
 
 static struct intc_mask_reg mask_registers[] __initdata = {
 	{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
-	  { 0,  TMU1_TUNI2,TMU1_TUNI1,TMU1_TUNI0,0,SDHI1_SDHII2,SDHI1_SDHII1,SDHI1_SDHII0} },
+	  { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
+	    0, DISABLED, ENABLED, ENABLED } },
 	{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
 	  { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } },
 	{ 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
@@ -763,7 +763,8 @@
 	  { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { 0,SDHI0_SDHII2,SDHI0_SDHII1,SDHI0_SDHII0,0,0,SCIFA_SCIFA2,SIU_SIUI } },
+	  { 0, DISABLED, ENABLED, ENABLED,
+	    0, 0, SCIFA_SCIFA2, SIU_SIUI } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } },
 	{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -803,9 +804,13 @@
 	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
 };
 
-static DECLARE_INTC_DESC_ACK(intc_desc, "sh7723", vectors, groups,
-			     mask_registers, prio_registers, sense_registers,
-			     ack_registers);
+static struct intc_desc intc_desc __initdata = {
+	.name = "sh7723",
+	.force_enable = ENABLED,
+	.force_disable = DISABLED,
+	.hw = INTC_HW_DESC(vectors, groups, mask_registers,
+			   prio_registers, sense_registers, ack_registers),
+};
 
 void __init plat_irq_setup(void)
 {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index d32f96c..31e3451 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -714,14 +714,17 @@
 #define RAMCR_CACHE_L2FC	0x0002
 #define RAMCR_CACHE_L2E		0x0001
 #define L2_CACHE_ENABLE		(RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
-void __uses_jump_to_uncached l2_cache_init(void)
+
+void l2_cache_init(void)
 {
 	/* Enable L2 cache */
-	ctrl_outl(L2_CACHE_ENABLE, RAMCR);
+	__raw_writel(L2_CACHE_ENABLE, RAMCR);
 }
 
 enum {
 	UNUSED = 0,
+	ENABLED,
+	DISABLED,
 
 	/* interrupt sources */
 	IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -750,14 +753,12 @@
 	ETHI,
 	I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
 	I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
-	SDHI0_SDHII0, SDHI0_SDHII1, SDHI0_SDHII2, SDHI0_SDHII3,
 	CMT,
 	TSIF,
 	FSI,
 	SCIFA5,
 	TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2,
 	IRDA,
-	SDHI1_SDHII0, SDHI1_SDHII1, SDHI1_SDHII2,
 	JPU,
 	_2DDMAC,
 	MMC_MMC2I, MMC_MMC3I,
@@ -839,10 +840,10 @@
 	INTC_VECT(I2C0_WAITI, 0xE40),
 	INTC_VECT(I2C0_DTEI, 0xE60),
 
-	INTC_VECT(SDHI0_SDHII0, 0xE80),
-	INTC_VECT(SDHI0_SDHII1, 0xEA0),
-	INTC_VECT(SDHI0_SDHII2, 0xEC0),
-	INTC_VECT(SDHI0_SDHII3, 0xEE0),
+	INTC_VECT(SDHI0, 0xE80),
+	INTC_VECT(SDHI0, 0xEA0),
+	INTC_VECT(SDHI0, 0xEC0),
+	INTC_VECT(SDHI0, 0xEE0),
 
 	INTC_VECT(CMT,    0xF00),
 	INTC_VECT(TSIF,   0xF20),
@@ -855,9 +856,9 @@
 
 	INTC_VECT(IRDA,    0x480),
 
-	INTC_VECT(SDHI1_SDHII0, 0x4E0),
-	INTC_VECT(SDHI1_SDHII1, 0x500),
-	INTC_VECT(SDHI1_SDHII2, 0x520),
+	INTC_VECT(SDHI1, 0x4E0),
+	INTC_VECT(SDHI1, 0x500),
+	INTC_VECT(SDHI1, 0x520),
 
 	INTC_VECT(JPU, 0x560),
 	INTC_VECT(_2DDMAC, 0x4A0),
@@ -883,8 +884,6 @@
 	INTC_GROUP(DMAC0B, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR),
 	INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
 	INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
-	INTC_GROUP(SDHI0, SDHI0_SDHII0, SDHI0_SDHII1, SDHI0_SDHII2, SDHI0_SDHII3),
-	INTC_GROUP(SDHI1, SDHI1_SDHII0, SDHI1_SDHII1, SDHI1_SDHII2),
 	INTC_GROUP(SPU, SPU_SPUI0, SPU_SPUI1),
 	INTC_GROUP(MMCIF, MMC_MMC2I, MMC_MMC3I),
 };
@@ -892,7 +891,7 @@
 static struct intc_mask_reg mask_registers[] __initdata = {
 	{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
 	  { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
-	    0, SDHI1_SDHII2, SDHI1_SDHII1, SDHI1_SDHII0 } },
+	    0, DISABLED, ENABLED, ENABLED } },
 	{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
 	  { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0,
 	    DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } },
@@ -914,7 +913,7 @@
 	  { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
 	    I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { SDHI0_SDHII3, SDHI0_SDHII2, SDHI0_SDHII1, SDHI0_SDHII0,
+	  { DISABLED, DISABLED, ENABLED, ENABLED,
 	    0, 0, SCIFA5, FSI } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT, 0, USB1, USB0, 0 } },
@@ -961,9 +960,13 @@
 	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
 };
 
-static DECLARE_INTC_DESC_ACK(intc_desc, "sh7724", vectors, groups,
-			     mask_registers, prio_registers, sense_registers,
-			     ack_registers);
+static struct intc_desc intc_desc __initdata = {
+	.name = "sh7724",
+	.force_enable = ENABLED,
+	.force_disable = DISABLED,
+	.hw = INTC_HW_DESC(vectors, groups, mask_registers,
+			   prio_registers, sense_registers, ack_registers),
+};
 
 void __init plat_irq_setup(void)
 {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index 37e32ef..e75edf5 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -487,17 +487,17 @@
 void __init plat_irq_setup(void)
 {
 	/* disable IRQ3-0 + IRQ7-4 */
-	ctrl_outl(0xff000000, INTC_INTMSK0);
+	__raw_writel(0xff000000, INTC_INTMSK0);
 
 	/* disable IRL3-0 + IRL7-4 */
-	ctrl_outl(0xc0000000, INTC_INTMSK1);
-	ctrl_outl(0xfffefffe, INTC_INTMSK2);
+	__raw_writel(0xc0000000, INTC_INTMSK1);
+	__raw_writel(0xfffefffe, INTC_INTMSK2);
 
 	/* select IRL mode for IRL3-0 + IRL7-4 */
-	ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+	__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
 
 	/* disable holding function, ie enable "SH-4 Mode" */
-	ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+	__raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
 
 	register_intc_controller(&intc_desc);
 }
@@ -507,32 +507,32 @@
 	switch (mode) {
 	case IRQ_MODE_IRQ7654:
 		/* select IRQ mode for IRL7-4 */
-		ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+		__raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
 		register_intc_controller(&intc_desc_irq4567);
 		break;
 	case IRQ_MODE_IRQ3210:
 		/* select IRQ mode for IRL3-0 */
-		ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+		__raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
 		register_intc_controller(&intc_desc_irq0123);
 		break;
 	case IRQ_MODE_IRL7654:
 		/* enable IRL7-4 but don't provide any masking */
-		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
-		ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+		__raw_writel(0x40000000, INTC_INTMSKCLR1);
+		__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
 		break;
 	case IRQ_MODE_IRL3210:
 		/* enable IRL0-3 but don't provide any masking */
-		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
-		ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+		__raw_writel(0x80000000, INTC_INTMSKCLR1);
+		__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
 		break;
 	case IRQ_MODE_IRL7654_MASK:
 		/* enable IRL7-4 and mask using cpu intc controller */
-		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+		__raw_writel(0x40000000, INTC_INTMSKCLR1);
 		register_intc_controller(&intc_desc_irl4567);
 		break;
 	case IRQ_MODE_IRL3210_MASK:
 		/* enable IRL0-3 and mask using cpu intc controller */
-		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+		__raw_writel(0x80000000, INTC_INTMSKCLR1);
 		register_intc_controller(&intc_desc_irl0123);
 		break;
 	default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index 6aba26f..7f6b0a5 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -538,11 +538,11 @@
 void __init plat_irq_setup(void)
 {
 	/* disable IRQ7-0 */
-	ctrl_outl(0xff000000, INTC_INTMSK0);
+	__raw_writel(0xff000000, INTC_INTMSK0);
 
 	/* disable IRL3-0 + IRL7-4 */
-	ctrl_outl(0xc0000000, INTC_INTMSK1);
-	ctrl_outl(0xfffefffe, INTC_INTMSK2);
+	__raw_writel(0xc0000000, INTC_INTMSK1);
+	__raw_writel(0xfffefffe, INTC_INTMSK2);
 
 	register_intc_controller(&intc_desc);
 }
@@ -552,27 +552,27 @@
 	switch (mode) {
 	case IRQ_MODE_IRQ:
 		/* select IRQ mode for IRL3-0 + IRL7-4 */
-		ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+		__raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
 		register_intc_controller(&intc_irq_desc);
 		break;
 	case IRQ_MODE_IRL7654:
 		/* enable IRL7-4 but don't provide any masking */
-		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
-		ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+		__raw_writel(0x40000000, INTC_INTMSKCLR1);
+		__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
 		break;
 	case IRQ_MODE_IRL3210:
 		/* enable IRL0-3 but don't provide any masking */
-		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
-		ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+		__raw_writel(0x80000000, INTC_INTMSKCLR1);
+		__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
 		break;
 	case IRQ_MODE_IRL7654_MASK:
 		/* enable IRL7-4 and mask using cpu intc controller */
-		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+		__raw_writel(0x40000000, INTC_INTMSKCLR1);
 		register_intc_controller(&intc_irl7654_desc);
 		break;
 	case IRQ_MODE_IRL3210_MASK:
 		/* enable IRL0-3 and mask using cpu intc controller */
-		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+		__raw_writel(0x80000000, INTC_INTMSKCLR1);
 		register_intc_controller(&intc_irl3210_desc);
 		break;
 	default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index c1643bc..86d681e 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -694,17 +694,17 @@
 void __init plat_irq_setup(void)
 {
 	/* disable IRQ7-0 */
-	ctrl_outl(0xff000000, INTC_INTMSK0);
+	__raw_writel(0xff000000, INTC_INTMSK0);
 
 	/* disable IRL3-0 + IRL7-4 */
-	ctrl_outl(0xc0000000, INTC_INTMSK1);
-	ctrl_outl(0xfffefffe, INTC_INTMSK2);
+	__raw_writel(0xc0000000, INTC_INTMSK1);
+	__raw_writel(0xfffefffe, INTC_INTMSK2);
 
 	/* select IRL mode for IRL3-0 + IRL7-4 */
-	ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+	__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
 
 	/* disable holding function, ie enable "SH-4 Mode" */
-	ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+	__raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
 
 	register_intc_controller(&intc_desc);
 }
@@ -714,27 +714,27 @@
 	switch (mode) {
 	case IRQ_MODE_IRQ:
 		/* select IRQ mode for IRL3-0 + IRL7-4 */
-		ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+		__raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
 		register_intc_controller(&intc_irq_desc);
 		break;
 	case IRQ_MODE_IRL7654:
 		/* enable IRL7-4 but don't provide any masking */
-		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
-		ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+		__raw_writel(0x40000000, INTC_INTMSKCLR1);
+		__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
 		break;
 	case IRQ_MODE_IRL3210:
 		/* enable IRL0-3 but don't provide any masking */
-		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
-		ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+		__raw_writel(0x80000000, INTC_INTMSKCLR1);
+		__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
 		break;
 	case IRQ_MODE_IRL7654_MASK:
 		/* enable IRL7-4 and mask using cpu intc controller */
-		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+		__raw_writel(0x40000000, INTC_INTMSKCLR1);
 		register_intc_controller(&intc_irl7654_desc);
 		break;
 	case IRQ_MODE_IRL3210_MASK:
 		/* enable IRL0-3 and mask using cpu intc controller */
-		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+		__raw_writel(0x80000000, INTC_INTMSKCLR1);
 		register_intc_controller(&intc_irl3210_desc);
 		break;
 	default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index c310558..f8f2161 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -461,17 +461,17 @@
 void __init plat_irq_setup(void)
 {
 	/* disable IRQ7-0 */
-	ctrl_outl(0xff000000, INTC_INTMSK0);
+	__raw_writel(0xff000000, INTC_INTMSK0);
 
 	/* disable IRL3-0 + IRL7-4 */
-	ctrl_outl(0xc0000000, INTC_INTMSK1);
-	ctrl_outl(0xfffefffe, INTC_INTMSK2);
+	__raw_writel(0xc0000000, INTC_INTMSK1);
+	__raw_writel(0xfffefffe, INTC_INTMSK2);
 
 	/* select IRL mode for IRL3-0 + IRL7-4 */
-	ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+	__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
 
 	/* disable holding function, ie enable "SH-4 Mode" */
-	ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+	__raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
 
 	register_intc_controller(&intc_desc);
 }
@@ -481,27 +481,27 @@
 	switch (mode) {
 	case IRQ_MODE_IRQ:
 		/* select IRQ mode for IRL3-0 + IRL7-4 */
-		ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+		__raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
 		register_intc_controller(&intc_irq_desc);
 		break;
 	case IRQ_MODE_IRL7654:
 		/* enable IRL7-4 but don't provide any masking */
-		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
-		ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+		__raw_writel(0x40000000, INTC_INTMSKCLR1);
+		__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
 		break;
 	case IRQ_MODE_IRL3210:
 		/* enable IRL0-3 but don't provide any masking */
-		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
-		ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+		__raw_writel(0x80000000, INTC_INTMSKCLR1);
+		__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
 		break;
 	case IRQ_MODE_IRL7654_MASK:
 		/* enable IRL7-4 and mask using cpu intc controller */
-		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+		__raw_writel(0x40000000, INTC_INTMSKCLR1);
 		register_intc_controller(&intc_irl7654_desc);
 		break;
 	case IRQ_MODE_IRL3210_MASK:
 		/* enable IRL0-3 and mask using cpu intc controller */
-		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+		__raw_writel(0x80000000, INTC_INTMSKCLR1);
 		register_intc_controller(&intc_irl3210_desc);
 		break;
 	default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index f685b9b..23448d8 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -541,17 +541,17 @@
 void __init plat_irq_setup(void)
 {
 	/* disable IRQ3-0 + IRQ7-4 */
-	ctrl_outl(0xff000000, INTC_INTMSK0);
+	__raw_writel(0xff000000, INTC_INTMSK0);
 
 	/* disable IRL3-0 + IRL7-4 */
-	ctrl_outl(0xc0000000, INTC_INTMSK1);
-	ctrl_outl(0xfffefffe, INTC_INTMSK2);
+	__raw_writel(0xc0000000, INTC_INTMSK1);
+	__raw_writel(0xfffefffe, INTC_INTMSK2);
 
 	/* select IRL mode for IRL3-0 + IRL7-4 */
-	ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+	__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
 
 	/* disable holding function, ie enable "SH-4 Mode" */
-	ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+	__raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
 
 	register_intc_controller(&intc_desc);
 }
@@ -561,32 +561,32 @@
 	switch (mode) {
 	case IRQ_MODE_IRQ7654:
 		/* select IRQ mode for IRL7-4 */
-		ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+		__raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
 		register_intc_controller(&intc_desc_irq4567);
 		break;
 	case IRQ_MODE_IRQ3210:
 		/* select IRQ mode for IRL3-0 */
-		ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+		__raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
 		register_intc_controller(&intc_desc_irq0123);
 		break;
 	case IRQ_MODE_IRL7654:
 		/* enable IRL7-4 but don't provide any masking */
-		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
-		ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+		__raw_writel(0x40000000, INTC_INTMSKCLR1);
+		__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
 		break;
 	case IRQ_MODE_IRL3210:
 		/* enable IRL0-3 but don't provide any masking */
-		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
-		ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+		__raw_writel(0x80000000, INTC_INTMSKCLR1);
+		__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
 		break;
 	case IRQ_MODE_IRL7654_MASK:
 		/* enable IRL7-4 and mask using cpu intc controller */
-		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+		__raw_writel(0x40000000, INTC_INTMSKCLR1);
 		register_intc_controller(&intc_desc_irl4567);
 		break;
 	case IRQ_MODE_IRL3210_MASK:
 		/* enable IRL0-3 and mask using cpu intc controller */
-		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+		__raw_writel(0x80000000, INTC_INTMSKCLR1);
 		register_intc_controller(&intc_desc_irl0123);
 		break;
 	default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 7167348..7e58532 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -867,14 +867,14 @@
 void __init plat_irq_setup(void)
 {
 	/* disable IRQ3-0 + IRQ7-4 */
-	ctrl_outl(0xff000000, INTC_INTMSK0);
+	__raw_writel(0xff000000, INTC_INTMSK0);
 
 	/* disable IRL3-0 + IRL7-4 */
-	ctrl_outl(0xc0000000, INTC_INTMSK1);
-	ctrl_outl(0xfffefffe, INTC_INTMSK2);
+	__raw_writel(0xc0000000, INTC_INTMSK1);
+	__raw_writel(0xfffefffe, INTC_INTMSK2);
 
 	/* select IRL mode for IRL3-0 + IRL7-4 */
-	ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+	__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
 
 	register_intc_controller(&intc_desc);
 }
@@ -884,32 +884,32 @@
 	switch (mode) {
 	case IRQ_MODE_IRQ7654:
 		/* select IRQ mode for IRL7-4 */
-		ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+		__raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
 		register_intc_controller(&intc_desc_irq4567);
 		break;
 	case IRQ_MODE_IRQ3210:
 		/* select IRQ mode for IRL3-0 */
-		ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+		__raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
 		register_intc_controller(&intc_desc_irq0123);
 		break;
 	case IRQ_MODE_IRL7654:
 		/* enable IRL7-4 but don't provide any masking */
-		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
-		ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+		__raw_writel(0x40000000, INTC_INTMSKCLR1);
+		__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
 		break;
 	case IRQ_MODE_IRL3210:
 		/* enable IRL0-3 but don't provide any masking */
-		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
-		ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+		__raw_writel(0x80000000, INTC_INTMSKCLR1);
+		__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
 		break;
 	case IRQ_MODE_IRL7654_MASK:
 		/* enable IRL7-4 and mask using cpu intc controller */
-		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+		__raw_writel(0x40000000, INTC_INTMSKCLR1);
 		register_intc_controller(&intc_desc_irl4567);
 		break;
 	case IRQ_MODE_IRL3210_MASK:
 		/* enable IRL0-3 and mask using cpu intc controller */
-		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+		__raw_writel(0x80000000, INTC_INTMSKCLR1);
 		register_intc_controller(&intc_desc_irl0123);
 		break;
 	default:
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 5863e0c..11bf4c1 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -78,7 +78,10 @@
 
 void plat_start_cpu(unsigned int cpu, unsigned long entry_point)
 {
-	__raw_writel(entry_point, RESET_REG(cpu));
+	if (__in_29bit_mode())
+		__raw_writel(entry_point, RESET_REG(cpu));
+	else
+		__raw_writel(virt_to_phys(entry_point), RESET_REG(cpu));
 
 	if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
 		__raw_writel(STBCR_MSTP, STBCR_REG(cpu));
diff --git a/arch/sh/kernel/cpu/sh4a/ubc.c b/arch/sh/kernel/cpu/sh4a/ubc.c
new file mode 100644
index 0000000..efb2745
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/ubc.c
@@ -0,0 +1,133 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/ubc.c
+ *
+ * On-chip UBC support for SH-4A CPUs.
+ *
+ * Copyright (C) 2009 - 2010  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <asm/hw_breakpoint.h>
+
+#define UBC_CBR(idx)	(0xff200000 + (0x20 * idx))
+#define UBC_CRR(idx)	(0xff200004 + (0x20 * idx))
+#define UBC_CAR(idx)	(0xff200008 + (0x20 * idx))
+#define UBC_CAMR(idx)	(0xff20000c + (0x20 * idx))
+
+#define UBC_CCMFR	0xff200600
+#define UBC_CBCR	0xff200620
+
+/* CRR */
+#define UBC_CRR_PCB	(1 << 1)
+#define UBC_CRR_BIE	(1 << 0)
+
+/* CBR */
+#define UBC_CBR_CE	(1 << 0)
+
+static struct sh_ubc sh4a_ubc;
+
+static void sh4a_ubc_enable(struct arch_hw_breakpoint *info, int idx)
+{
+	__raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR(idx));
+	__raw_writel(info->address, UBC_CAR(idx));
+}
+
+static void sh4a_ubc_disable(struct arch_hw_breakpoint *info, int idx)
+{
+	__raw_writel(0, UBC_CBR(idx));
+	__raw_writel(0, UBC_CAR(idx));
+}
+
+static void sh4a_ubc_enable_all(unsigned long mask)
+{
+	int i;
+
+	for (i = 0; i < sh4a_ubc.num_events; i++)
+		if (mask & (1 << i))
+			__raw_writel(__raw_readl(UBC_CBR(i)) | UBC_CBR_CE,
+				     UBC_CBR(i));
+}
+
+static void sh4a_ubc_disable_all(void)
+{
+	int i;
+
+	for (i = 0; i < sh4a_ubc.num_events; i++)
+		__raw_writel(__raw_readl(UBC_CBR(i)) & ~UBC_CBR_CE,
+			     UBC_CBR(i));
+}
+
+static unsigned long sh4a_ubc_active_mask(void)
+{
+	unsigned long active = 0;
+	int i;
+
+	for (i = 0; i < sh4a_ubc.num_events; i++)
+		if (__raw_readl(UBC_CBR(i)) & UBC_CBR_CE)
+			active |= (1 << i);
+
+	return active;
+}
+
+static unsigned long sh4a_ubc_triggered_mask(void)
+{
+	return __raw_readl(UBC_CCMFR);
+}
+
+static void sh4a_ubc_clear_triggered_mask(unsigned long mask)
+{
+	__raw_writel(__raw_readl(UBC_CCMFR) & ~mask, UBC_CCMFR);
+}
+
+static struct sh_ubc sh4a_ubc = {
+	.name			= "SH-4A",
+	.num_events		= 2,
+	.trap_nr		= 0x1e0,
+	.enable			= sh4a_ubc_enable,
+	.disable		= sh4a_ubc_disable,
+	.enable_all		= sh4a_ubc_enable_all,
+	.disable_all		= sh4a_ubc_disable_all,
+	.active_mask		= sh4a_ubc_active_mask,
+	.triggered_mask		= sh4a_ubc_triggered_mask,
+	.clear_triggered_mask	= sh4a_ubc_clear_triggered_mask,
+};
+
+static int __init sh4a_ubc_init(void)
+{
+	struct clk *ubc_iclk = clk_get(NULL, "ubc0");
+	int i;
+
+	/*
+	 * The UBC MSTP bit is optional, as not all platforms will have
+	 * it. Just ignore it if we can't find it.
+	 */
+	if (IS_ERR(ubc_iclk))
+		ubc_iclk = NULL;
+
+	clk_enable(ubc_iclk);
+
+	__raw_writel(0, UBC_CBCR);
+
+	for (i = 0; i < sh4a_ubc.num_events; i++) {
+		__raw_writel(0, UBC_CAMR(i));
+		__raw_writel(0, UBC_CBR(i));
+
+		__raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR(i));
+
+		/* dummy read for write posting */
+		(void)__raw_readl(UBC_CRR(i));
+	}
+
+	clk_disable(ubc_iclk);
+
+	sh4a_ubc.clk = ubc_iclk;
+
+	return register_sh_ubc(&sh4a_ubc);
+}
+arch_initcall(sh4a_ubc_init);
diff --git a/arch/sh/kernel/cpu/sh5/clock-sh5.c b/arch/sh/kernel/cpu/sh5/clock-sh5.c
index 7f864eb..9cfc19b 100644
--- a/arch/sh/kernel/cpu/sh5/clock-sh5.c
+++ b/arch/sh/kernel/cpu/sh5/clock-sh5.c
@@ -24,7 +24,7 @@
 
 static void master_clk_init(struct clk *clk)
 {
-	int idx = (ctrl_inl(cprc_base + 0x00) >> 6) & 0x0007;
+	int idx = (__raw_readl(cprc_base + 0x00) >> 6) & 0x0007;
 	clk->rate *= ifc_table[idx];
 }
 
@@ -34,7 +34,7 @@
 
 static unsigned long module_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(cprc_base) >> 12) & 0x0007;
+	int idx = (__raw_readw(cprc_base) >> 12) & 0x0007;
 	return clk->parent->rate / ifc_table[idx];
 }
 
@@ -44,7 +44,7 @@
 
 static unsigned long bus_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(cprc_base) >> 3) & 0x0007;
+	int idx = (__raw_readw(cprc_base) >> 3) & 0x0007;
 	return clk->parent->rate / ifc_table[idx];
 }
 
@@ -54,7 +54,7 @@
 
 static unsigned long cpu_clk_recalc(struct clk *clk)
 {
-	int idx = (ctrl_inw(cprc_base) & 0x0007);
+	int idx = (__raw_readw(cprc_base) & 0x0007);
 	return clk->parent->rate / ifc_table[idx];
 }
 
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index 8f13f73..6b80295 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -187,7 +187,7 @@
 	.rept 6
 		.long do_exception_error	/* 0x880 - 0x920 */
 	.endr
-	.long	do_software_break_point	/* 0x940 */
+	.long	breakpoint_trap_handler	/* 0x940 */
 	.long	do_exception_error		/* 0x960 */
 	.long	do_single_step		/* 0x980 */
 
@@ -1124,7 +1124,7 @@
 	pta	its_IRQ, tr0
 	beqi/l	r4, EVENT_INTERRUPT, tr0
 #ifdef CONFIG_SH_FPU
-	movi	do_fpu_state_restore, r6
+	movi	fpu_state_restore_trap_handler, r6
 #else
 	movi	do_exception_error, r6
 #endif
@@ -1135,7 +1135,7 @@
 	pta	its_IRQ, tr0
 	beqi/l	r4, EVENT_INTERRUPT, tr0
 #ifdef CONFIG_SH_FPU
-	movi	do_fpu_state_restore, r6
+	movi	fpu_state_restore_trap_handler, r6
 #else
 	movi	do_exception_error, r6
 #endif
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
index 4648cce..4b3bb35 100644
--- a/arch/sh/kernel/cpu/sh5/fpu.c
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -15,24 +15,6 @@
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <asm/processor.h>
-#include <asm/user.h>
-#include <asm/io.h>
-#include <asm/fpu.h>
-
-/*
- * Initially load the FPU with signalling NANS.  This bit pattern
- * has the property that no matter whether considered as single or as
- * double precision, it still represents a signalling NAN.
- */
-#define sNAN64		0xFFFFFFFFFFFFFFFFULL
-#define sNAN32		0xFFFFFFFFUL
-
-static union sh_fpu_union init_fpuregs = {
-	.hard = {
-		.fp_regs = { [0 ... 63] = sNAN32 },
-		.fpscr = FPSCR_INIT
-	}
-};
 
 void save_fpu(struct task_struct *tsk)
 {
@@ -72,12 +54,11 @@
 		     "fgetscr   fr63\n\t"
 		     "fst.s     %0, (32*8), fr63\n\t"
 		: /* no output */
-		: "r" (&tsk->thread.fpu.hard)
+		: "r" (&tsk->thread.xstate->hardfpu)
 		: "memory");
 }
 
-static inline void
-fpload(struct sh_fpu_hard_struct *fpregs)
+void restore_fpu(struct task_struct *tsk)
 {
 	asm volatile("fld.p     %0, (0*8), fp0\n\t"
 		     "fld.p     %0, (1*8), fp2\n\t"
@@ -116,16 +97,11 @@
 
 		     "fld.p     %0, (31*8), fp62\n\t"
 		: /* no output */
-		: "r" (fpregs) );
+		: "r" (&tsk->thread.xstate->hardfpu)
+		: "memory");
 }
 
-void fpinit(struct sh_fpu_hard_struct *fpregs)
-{
-	*fpregs = init_fpuregs.hard;
-}
-
-asmlinkage void
-do_fpu_error(unsigned long ex, struct pt_regs *regs)
+asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
 {
 	struct task_struct *tsk = current;
 
@@ -133,35 +109,6 @@
 
 	tsk->thread.trap_no = 11;
 	tsk->thread.error_code = 0;
+
 	force_sig(SIGFPE, tsk);
 }
-
-
-asmlinkage void
-do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
-{
-	void die(const char *str, struct pt_regs *regs, long err);
-
-	if (! user_mode(regs))
-		die("FPU used in kernel", regs, ex);
-
-	regs->sr &= ~SR_FD;
-
-	if (last_task_used_math == current)
-		return;
-
-	enable_fpu();
-	if (last_task_used_math != NULL)
-		/* Other processes fpu state, save away */
-		save_fpu(last_task_used_math);
-
-        last_task_used_math = current;
-        if (used_math()) {
-                fpload(&current->thread.fpu.hard);
-        } else {
-		/* First time FPU user.  */
-		fpload(&init_fpuregs.hard);
-                set_used_math();
-        }
-	disable_fpu();
-}
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index ca029a4..e559687 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -33,7 +33,8 @@
 #define SUSP_MODE_SLEEP		(SUSP_SH_SLEEP)
 #define SUSP_MODE_SLEEP_SF	(SUSP_SH_SLEEP | SUSP_SH_SF)
 #define SUSP_MODE_STANDBY_SF	(SUSP_SH_STANDBY | SUSP_SH_SF)
-#define SUSP_MODE_RSTANDBY	(SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_SF)
+#define SUSP_MODE_RSTANDBY_SF \
+	(SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_REGS | SUSP_SH_SF)
  /*
   * U-standby mode is unsupported since it needs bootloader hacks
   */
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S
index e9dd7fa..e6aac65 100644
--- a/arch/sh/kernel/cpu/shmobile/sleep.S
+++ b/arch/sh/kernel/cpu/shmobile/sleep.S
@@ -48,8 +48,48 @@
 	stc	sr, r0
 	mov.l	r0, @(SH_SLEEP_SR, r5)
 
-	/* save sp */
+	/* save general purpose registers to stack if needed */
+	mov.l	@(SH_SLEEP_MODE, r5), r0
+	tst	#SUSP_SH_REGS, r0
+	bt	skip_regs_save
+
+	sts.l	pr, @-r15
+	mov.l	r14, @-r15
+	mov.l	r13, @-r15
+	mov.l	r12, @-r15
+	mov.l	r11, @-r15
+	mov.l	r10, @-r15
+	mov.l	r9, @-r15
+	mov.l	r8, @-r15
+
+	/* make sure bank0 is selected, save low registers */
+	mov.l	rb_bit, r9
+	not	r9, r9
+	bsr	set_sr
+	 mov	#0, r10
+
+	bsr	save_low_regs
+	 nop
+
+	/* switch to bank 1, save low registers */
+	mov.l	rb_bit, r10
+	bsr	set_sr
+	 mov	#-1, r9
+
+	bsr	save_low_regs
+	 nop
+
+	/* switch back to bank 0 */
+	mov.l	rb_bit, r9
+	not	r9, r9
+	bsr	set_sr
+	 mov	#0, r10
+
+skip_regs_save:
+
+	/* save sp, also set to internal ram */
 	mov.l	r15, @(SH_SLEEP_SP, r5)
+	mov	r5, r15
 
 	/* save stbcr */
 	bsr     save_register
@@ -60,7 +100,7 @@
 	tst	#SUSP_SH_MMU, r0
 	bt	skip_mmu_save_disable
 
-       /* save mmu state */
+	/* save mmu state */
 	bsr	save_register
 	 mov	#SH_SLEEP_REG_PTEH, r0
 
@@ -177,6 +217,29 @@
 	mov.l	@(r0, r5), r0
 	rts
 	 nop
+
+set_sr:
+	stc	sr, r8
+	and	r9, r8
+	or	r10, r8
+	ldc	r8, sr
+	rts
+	 nop
+
+save_low_regs:
+	mov.l	r7, @-r15
+	mov.l	r6, @-r15
+	mov.l	r5, @-r15
+	mov.l	r4, @-r15
+	mov.l	r3, @-r15
+	mov.l	r2, @-r15
+	mov.l	r1, @-r15
+	rts
+	 mov.l	r0, @-r15
+
+	.balign 4
+rb_bit:	.long	0x20000000 ! RB=1
+
 ENTRY(sh_mobile_sleep_enter_end)
 
 	.balign 4
@@ -270,6 +333,40 @@
 	icbi	@r0
 
 skip_restore_mmu:
+
+	/* restore general purpose registers if needed */
+	mov.l	@(SH_SLEEP_MODE, r5), r0
+	tst	#SUSP_SH_REGS, r0
+	bt	skip_restore_regs
+
+	/* switch to bank 1, restore low registers */
+	mov.l	_rb_bit, r10
+	bsr	_set_sr
+	 mov	#-1, r9
+
+	bsr	restore_low_regs
+	 nop
+
+	/* switch to bank0, restore low registers */
+	mov.l	_rb_bit, r9
+	not	r9, r9
+	bsr	_set_sr
+	 mov	#0, r10
+
+	bsr	restore_low_regs
+	 nop
+
+	/* restore the rest of the registers */
+	mov.l	@r15+, r8
+	mov.l	@r15+, r9
+	mov.l	@r15+, r10
+	mov.l	@r15+, r11
+	mov.l	@r15+, r12
+	mov.l	@r15+, r13
+	mov.l	@r15+, r14
+	lds.l	@r15+, pr
+
+skip_restore_regs:
 	rte
 	 nop
 
@@ -283,6 +380,26 @@
 	rts
 	 nop
 
+_set_sr:
+	stc	sr, r8
+	and	r9, r8
+	or	r10, r8
+	ldc	r8, sr
+	rts
+	 nop
+
+restore_low_regs:
+	mov.l	@r15+, r0
+	mov.l	@r15+, r1
+	mov.l	@r15+, r2
+	mov.l	@r15+, r3
+	mov.l	@r15+, r4
+	mov.l	@r15+, r5
+	mov.l	@r15+, r6
+	rts
+	 mov.l	@r15+, r7
+
 	.balign 4
+_rb_bit:	.long	0x20000000 ! RB=1
 1:	.long	~0x7ff
 ENTRY(sh_mobile_sleep_resume_end)
diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S
index 5917413..7a1b46f 100644
--- a/arch/sh/kernel/debugtraps.S
+++ b/arch/sh/kernel/debugtraps.S
@@ -13,7 +13,6 @@
 #include <linux/linkage.h>
 
 #if !defined(CONFIG_KGDB)
-#define breakpoint_trap_handler		debug_trap_handler
 #define singlestep_trap_handler		debug_trap_handler
 #endif
 
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index e511680..bd1c497 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -39,10 +39,10 @@
 static struct kmem_cache *dwarf_reg_cachep;
 static mempool_t *dwarf_reg_pool;
 
-static LIST_HEAD(dwarf_cie_list);
+static struct rb_root cie_root;
 static DEFINE_SPINLOCK(dwarf_cie_lock);
 
-static LIST_HEAD(dwarf_fde_list);
+static struct rb_root fde_root;
 static DEFINE_SPINLOCK(dwarf_fde_lock);
 
 static struct dwarf_cie *cached_cie;
@@ -301,7 +301,8 @@
  */
 static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
 {
-	struct dwarf_cie *cie;
+	struct rb_node **rb_node = &cie_root.rb_node;
+	struct dwarf_cie *cie = NULL;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dwarf_cie_lock, flags);
@@ -315,16 +316,24 @@
 		goto out;
 	}
 
-	list_for_each_entry(cie, &dwarf_cie_list, link) {
-		if (cie->cie_pointer == cie_ptr) {
-			cached_cie = cie;
-			break;
+	while (*rb_node) {
+		struct dwarf_cie *cie_tmp;
+
+		cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
+		BUG_ON(!cie_tmp);
+
+		if (cie_ptr == cie_tmp->cie_pointer) {
+			cie = cie_tmp;
+			cached_cie = cie_tmp;
+			goto out;
+		} else {
+			if (cie_ptr < cie_tmp->cie_pointer)
+				rb_node = &(*rb_node)->rb_left;
+			else
+				rb_node = &(*rb_node)->rb_right;
 		}
 	}
 
-	/* Couldn't find the entry in the list. */
-	if (&cie->link == &dwarf_cie_list)
-		cie = NULL;
 out:
 	spin_unlock_irqrestore(&dwarf_cie_lock, flags);
 	return cie;
@@ -336,25 +345,34 @@
  */
 struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
 {
-	struct dwarf_fde *fde;
+	struct rb_node **rb_node = &fde_root.rb_node;
+	struct dwarf_fde *fde = NULL;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dwarf_fde_lock, flags);
 
-	list_for_each_entry(fde, &dwarf_fde_list, link) {
-		unsigned long start, end;
+	while (*rb_node) {
+		struct dwarf_fde *fde_tmp;
+		unsigned long tmp_start, tmp_end;
 
-		start = fde->initial_location;
-		end = fde->initial_location + fde->address_range;
+		fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
+		BUG_ON(!fde_tmp);
 
-		if (pc >= start && pc < end)
-			break;
+		tmp_start = fde_tmp->initial_location;
+		tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
+
+		if (pc < tmp_start) {
+			rb_node = &(*rb_node)->rb_left;
+		} else {
+			if (pc < tmp_end) {
+				fde = fde_tmp;
+				goto out;
+			} else
+				rb_node = &(*rb_node)->rb_right;
+		}
 	}
 
-	/* Couldn't find the entry in the list. */
-	if (&fde->link == &dwarf_fde_list)
-		fde = NULL;
-
+out:
 	spin_unlock_irqrestore(&dwarf_fde_lock, flags);
 
 	return fde;
@@ -552,8 +570,8 @@
  *	on the callstack. Each of the lower (older) stack frames are
  *	linked via the "prev" member.
  */
-struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
-					struct dwarf_frame *prev)
+struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
+				       struct dwarf_frame *prev)
 {
 	struct dwarf_frame *frame;
 	struct dwarf_cie *cie;
@@ -708,6 +726,8 @@
 static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
 			   unsigned char *end, struct module *mod)
 {
+	struct rb_node **rb_node = &cie_root.rb_node;
+	struct rb_node *parent;
 	struct dwarf_cie *cie;
 	unsigned long flags;
 	int count;
@@ -802,11 +822,30 @@
 	cie->initial_instructions = p;
 	cie->instructions_end = end;
 
-	cie->mod = mod;
-
 	/* Add to list */
 	spin_lock_irqsave(&dwarf_cie_lock, flags);
-	list_add_tail(&cie->link, &dwarf_cie_list);
+
+	while (*rb_node) {
+		struct dwarf_cie *cie_tmp;
+
+		cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
+
+		parent = *rb_node;
+
+		if (cie->cie_pointer < cie_tmp->cie_pointer)
+			rb_node = &parent->rb_left;
+		else if (cie->cie_pointer >= cie_tmp->cie_pointer)
+			rb_node = &parent->rb_right;
+		else
+			WARN_ON(1);
+	}
+
+	rb_link_node(&cie->node, parent, rb_node);
+	rb_insert_color(&cie->node, &cie_root);
+
+	if (mod != NULL)
+		list_add_tail(&cie->link, &mod->arch.cie_list);
+
 	spin_unlock_irqrestore(&dwarf_cie_lock, flags);
 
 	return 0;
@@ -816,6 +855,8 @@
 			   void *start, unsigned long len,
 			   unsigned char *end, struct module *mod)
 {
+	struct rb_node **rb_node = &fde_root.rb_node;
+	struct rb_node *parent;
 	struct dwarf_fde *fde;
 	struct dwarf_cie *cie;
 	unsigned long flags;
@@ -863,11 +904,38 @@
 	fde->instructions = p;
 	fde->end = end;
 
-	fde->mod = mod;
-
 	/* Add to list. */
 	spin_lock_irqsave(&dwarf_fde_lock, flags);
-	list_add_tail(&fde->link, &dwarf_fde_list);
+
+	while (*rb_node) {
+		struct dwarf_fde *fde_tmp;
+		unsigned long tmp_start, tmp_end;
+		unsigned long start, end;
+
+		fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
+
+		start = fde->initial_location;
+		end = fde->initial_location + fde->address_range;
+
+		tmp_start = fde_tmp->initial_location;
+		tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
+
+		parent = *rb_node;
+
+		if (start < tmp_start)
+			rb_node = &parent->rb_left;
+		else if (start >= tmp_end)
+			rb_node = &parent->rb_right;
+		else
+			WARN_ON(1);
+	}
+
+	rb_link_node(&fde->node, parent, rb_node);
+	rb_insert_color(&fde->node, &fde_root);
+
+	if (mod != NULL)
+		list_add_tail(&fde->link, &mod->arch.fde_list);
+
 	spin_unlock_irqrestore(&dwarf_fde_lock, flags);
 
 	return 0;
@@ -912,19 +980,29 @@
 
 static void dwarf_unwinder_cleanup(void)
 {
-	struct dwarf_cie *cie, *cie_tmp;
-	struct dwarf_fde *fde, *fde_tmp;
+	struct rb_node **fde_rb_node = &fde_root.rb_node;
+	struct rb_node **cie_rb_node = &cie_root.rb_node;
 
 	/*
 	 * Deallocate all the memory allocated for the DWARF unwinder.
 	 * Traverse all the FDE/CIE lists and remove and free all the
 	 * memory associated with those data structures.
 	 */
-	list_for_each_entry_safe(cie, cie_tmp, &dwarf_cie_list, link)
-		kfree(cie);
+	while (*fde_rb_node) {
+		struct dwarf_fde *fde;
 
-	list_for_each_entry_safe(fde, fde_tmp, &dwarf_fde_list, link)
+		fde = rb_entry(*fde_rb_node, struct dwarf_fde, node);
+		rb_erase(*fde_rb_node, &fde_root);
 		kfree(fde);
+	}
+
+	while (*cie_rb_node) {
+		struct dwarf_cie *cie;
+
+		cie = rb_entry(*cie_rb_node, struct dwarf_cie, node);
+		rb_erase(*cie_rb_node, &cie_root);
+		kfree(cie);
+	}
 
 	kmem_cache_destroy(dwarf_reg_cachep);
 	kmem_cache_destroy(dwarf_frame_cachep);
@@ -1024,6 +1102,8 @@
 
 	/* Did we find the .eh_frame section? */
 	if (i != hdr->e_shnum) {
+		INIT_LIST_HEAD(&me->arch.cie_list);
+		INIT_LIST_HEAD(&me->arch.fde_list);
 		err = dwarf_parse_section((char *)start, (char *)end, me);
 		if (err) {
 			printk(KERN_WARNING "%s: failed to parse DWARF info\n",
@@ -1044,38 +1124,26 @@
  */
 void module_dwarf_cleanup(struct module *mod)
 {
-	struct dwarf_fde *fde;
-	struct dwarf_cie *cie;
+	struct dwarf_fde *fde, *ftmp;
+	struct dwarf_cie *cie, *ctmp;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dwarf_cie_lock, flags);
 
-again_cie:
-	list_for_each_entry(cie, &dwarf_cie_list, link) {
-		if (cie->mod == mod)
-			break;
-	}
-
-	if (&cie->link != &dwarf_cie_list) {
+	list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
 		list_del(&cie->link);
+		rb_erase(&cie->node, &cie_root);
 		kfree(cie);
-		goto again_cie;
 	}
 
 	spin_unlock_irqrestore(&dwarf_cie_lock, flags);
 
 	spin_lock_irqsave(&dwarf_fde_lock, flags);
 
-again_fde:
-	list_for_each_entry(fde, &dwarf_fde_list, link) {
-		if (fde->mod == mod)
-			break;
-	}
-
-	if (&fde->link != &dwarf_fde_list) {
+	list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
 		list_del(&fde->link);
+		rb_erase(&fde->node, &fde_root);
 		kfree(fde);
-		goto again_fde;
 	}
 
 	spin_unlock_irqrestore(&dwarf_fde_lock, flags);
@@ -1094,8 +1162,6 @@
 static int __init dwarf_unwinder_init(void)
 {
 	int err;
-	INIT_LIST_HEAD(&dwarf_cie_list);
-	INIT_LIST_HEAD(&dwarf_fde_list);
 
 	dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
 			sizeof(struct dwarf_frame), 0,
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
deleted file mode 100644
index f8bb50c..0000000
--- a/arch/sh/kernel/early_printk.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * arch/sh/kernel/early_printk.c
- *
- *  Copyright (C) 1999, 2000  Niibe Yutaka
- *  Copyright (C) 2002  M. R. Brown
- *  Copyright (C) 2004 - 2007  Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/console.h>
-#include <linux/tty.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-
-#include <asm/sh_bios.h>
-
-/*
- *	Print a string through the BIOS
- */
-static void sh_console_write(struct console *co, const char *s,
-				 unsigned count)
-{
-	sh_bios_console_write(s, count);
-}
-
-/*
- *	Setup initial baud/bits/parity. We do two things here:
- *	- construct a cflag setting for the first rs_open()
- *	- initialize the serial port
- *	Return non-zero if we didn't find a serial port.
- */
-static int __init sh_console_setup(struct console *co, char *options)
-{
-	int	cflag = CREAD | HUPCL | CLOCAL;
-
-	/*
-	 *	Now construct a cflag setting.
-	 *	TODO: this is a totally bogus cflag, as we have
-	 *	no idea what serial settings the BIOS is using, or
-	 *	even if its using the serial port at all.
-	 */
-	cflag |= B115200 | CS8 | /*no parity*/0;
-
-	co->cflag = cflag;
-
-	return 0;
-}
-
-static struct console bios_console = {
-	.name		= "bios",
-	.write		= sh_console_write,
-	.setup		= sh_console_setup,
-	.flags		= CON_PRINTBUFFER,
-	.index		= -1,
-};
-
-static struct console *early_console;
-
-static int __init setup_early_printk(char *buf)
-{
-	int keep_early = 0;
-
-	if (!buf)
-		return 0;
-
-	if (strstr(buf, "keep"))
-		keep_early = 1;
-
-	if (!strncmp(buf, "bios", 4))
-		early_console = &bios_console;
-
-	if (likely(early_console)) {
-		if (keep_early)
-			early_console->flags &= ~CON_BOOT;
-		else
-			early_console->flags |= CON_BOOT;
-		register_console(early_console);
-	}
-
-	return 0;
-}
-early_param("earlyprintk", setup_early_printk);
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index 1151ecd..fe0b743 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -3,6 +3,7 @@
  *  arch/sh/kernel/head.S
  *
  *  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
+ *  Copyright (C) 2010  Matt Fleming
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -13,6 +14,8 @@
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <asm/thread_info.h>
+#include <asm/mmu.h>
+#include <cpu/mmu_context.h>
 
 #ifdef CONFIG_CPU_SH4A
 #define SYNCO()		synco
@@ -33,7 +36,7 @@
 	.long	1		/* LOADER_TYPE */
 	.long	0x00000000	/* INITRD_START */
 	.long	0x00000000	/* INITRD_SIZE */
-#if defined(CONFIG_32BIT) && defined(CONFIG_PMB_FIXED)
+#ifdef CONFIG_32BIT
 	.long	0x53453f00 + 32	/* "SE?" = 32 bit */
 #else
 	.long	0x53453f00 + 29	/* "SE?" = 29 bit */
@@ -82,6 +85,209 @@
 	ldc	r0, r7_bank	! ... and initial thread_info
 #endif
 
+#ifdef CONFIG_PMB
+/*
+ * Reconfigure the initial PMB mappings setup by the hardware.
+ *
+ * When we boot in 32-bit MMU mode there are 2 PMB entries already
+ * setup for us.
+ *
+ * Entry       VPN	   PPN	    V	SZ	C	UB	WT
+ * ---------------------------------------------------------------
+ *   0	    0x80000000 0x00000000   1  512MB	1	0	1
+ *   1	    0xA0000000 0x00000000   1  512MB	0	0	0
+ *
+ * But we reprogram them here because we want complete control over
+ * our address space and the initial mappings may not map PAGE_OFFSET
+ * to __MEMORY_START (or even map all of our RAM).
+ *
+ * Once we've setup cached and uncached mappings we clear the rest of the
+ * PMB entries. This clearing also deals with the fact that PMB entries
+ * can persist across reboots. The PMB could have been left in any state
+ * when the reboot occurred, so to be safe we clear all entries and start
+ * with with a clean slate.
+ *
+ * The uncached mapping is constructed using the smallest possible
+ * mapping with a single unbufferable page. Only the kernel text needs to
+ * be covered via the uncached mapping so that certain functions can be
+ * run uncached.
+ *
+ * Drivers and the like that have previously abused the 1:1 identity
+ * mapping are unsupported in 32-bit mode and must specify their caching
+ * preference when page tables are constructed.
+ *
+ * This frees up the P2 space for more nefarious purposes.
+ *
+ * Register utilization is as follows:
+ *
+ *	r0 = PMB_DATA data field
+ *	r1 = PMB_DATA address field
+ *	r2 = PMB_ADDR data field
+ *	r3 = PMB_ADDR address field
+ *	r4 = PMB_E_SHIFT
+ *	r5 = remaining amount of RAM to map
+ *	r6 = PMB mapping size we're trying to use
+ *	r7 = cached_to_uncached
+ *	r8 = scratch register
+ *	r9 = scratch register
+ *	r10 = number of PMB entries we've setup
+ */
+
+	mov.l	.LMMUCR, r1	/* Flush the TLB */
+	mov.l	@r1, r0
+	or	#MMUCR_TI, r0
+	mov.l	r0, @r1
+
+	mov.l	.LMEMORY_SIZE, r5
+
+	mov	#PMB_E_SHIFT, r0
+	mov	#0x1, r4
+	shld	r0, r4
+
+	mov.l	.LFIRST_DATA_ENTRY, r0
+	mov.l	.LPMB_DATA, r1
+	mov.l	.LFIRST_ADDR_ENTRY, r2
+	mov.l	.LPMB_ADDR, r3
+
+	/*
+	 * First we need to walk the PMB and figure out if there are any
+	 * existing mappings that match the initial mappings VPN/PPN.
+	 * If these have already been established by the bootloader, we
+	 * don't bother setting up new entries here, and let the late PMB
+	 * initialization take care of things instead.
+	 *
+	 * Note that we may need to coalesce and merge entries in order
+	 * to reclaim more available PMB slots, which is much more than
+	 * we want to do at this early stage.
+	 */
+	mov	#0, r10
+	mov	#NR_PMB_ENTRIES, r9
+
+	mov	r1, r7		/* temporary PMB_DATA iter */
+
+.Lvalidate_existing_mappings:
+
+	mov.l	@r7, r8
+	and	r0, r8
+	cmp/eq	r0, r8		/* Check for valid __MEMORY_START mappings */
+	bt	.Lpmb_done
+
+	add	#1, r10		/* Increment the loop counter */
+	cmp/eq	r9, r10
+	bf/s	.Lvalidate_existing_mappings
+	 add	r4, r7		/* Increment to the next PMB_DATA entry */
+
+	/*
+	 * If we've fallen through, continue with setting up the initial
+	 * mappings.
+	 */
+
+	mov	r5, r7		/* cached_to_uncached */
+	mov	#0, r10
+
+#ifdef CONFIG_UNCACHED_MAPPING
+	/*
+	 * Uncached mapping
+	 */
+	mov	#(PMB_SZ_16M >> 2), r9
+	shll2	r9
+
+	mov	#(PMB_UB >> 8), r8
+	shll8	r8
+
+	or	r0, r8
+	or	r9, r8
+	mov.l	r8, @r1
+	mov	r2, r8
+	add	r7, r8
+	mov.l	r8, @r3
+
+	add	r4, r1
+	add	r4, r3
+	add	#1, r10
+#endif
+
+/*
+ * Iterate over all of the available sizes from largest to
+ * smallest for constructing the cached mapping.
+ */
+#define __PMB_ITER_BY_SIZE(size)			\
+.L##size:						\
+	mov	#(size >> 4), r6;			\
+	shll16	r6;					\
+	shll8	r6;					\
+							\
+	cmp/hi	r5, r6;					\
+	bt	9999f;					\
+							\
+	mov	#(PMB_SZ_##size##M >> 2), r9;		\
+	shll2	r9;					\
+							\
+	/*						\
+	 * Cached mapping				\
+	 */						\
+	mov	#PMB_C, r8;				\
+	or	r0, r8;					\
+	or	r9, r8;					\
+	mov.l	r8, @r1;				\
+	mov.l	r2, @r3;				\
+							\
+	/* Increment to the next PMB_DATA entry */	\
+	add	r4, r1;					\
+	/* Increment to the next PMB_ADDR entry */	\
+	add	r4, r3;					\
+	/* Increment number of PMB entries */		\
+	add	#1, r10;				\
+							\
+	sub	r6, r5;					\
+	add	r6, r0;					\
+	add	r6, r2;					\
+							\
+	bra	.L##size;				\
+9999:
+
+	__PMB_ITER_BY_SIZE(512)
+	__PMB_ITER_BY_SIZE(128)
+	__PMB_ITER_BY_SIZE(64)
+	__PMB_ITER_BY_SIZE(16)
+
+#ifdef CONFIG_UNCACHED_MAPPING
+	/*
+	 * Now that we can access it, update cached_to_uncached and
+	 * uncached_size.
+	 */
+	mov.l	.Lcached_to_uncached, r0
+	mov.l	r7, @r0
+
+	mov.l	.Luncached_size, r0
+	mov	#1, r7
+	shll16	r7
+	shll8	r7
+	mov.l	r7, @r0
+#endif
+
+	/*
+	 * Clear the remaining PMB entries.
+	 *
+	 * r3 = entry to begin clearing from
+	 * r10 = number of entries we've setup so far
+	 */
+	mov	#0, r1
+	mov	#NR_PMB_ENTRIES, r0
+
+.Lagain:
+	mov.l	r1, @r3		/* Clear PMB_ADDR entry */
+	add	#1, r10		/* Increment the loop counter */
+	cmp/eq	r0, r10
+	bf/s	.Lagain
+	 add	r4, r3		/* Increment to the next PMB_ADDR entry */
+
+	mov.l	6f, r0
+	icbi	@r0
+
+.Lpmb_done:
+#endif /* CONFIG_PMB */
+
 #ifndef CONFIG_SH_NO_BSS_INIT
 	/*
 	 * Don't clear BSS if running on slow platforms such as an RTL simulation,
@@ -131,3 +337,16 @@
 5:	.long	start_kernel
 6:	.long	sh_cpu_init
 7:	.long	init_thread_union
+
+#ifdef CONFIG_PMB
+.LPMB_ADDR:		.long	PMB_ADDR
+.LPMB_DATA:		.long	PMB_DATA
+.LFIRST_ADDR_ENTRY:	.long	PAGE_OFFSET | PMB_V
+.LFIRST_DATA_ENTRY:	.long	__MEMORY_START | PMB_V
+.LMMUCR:		.long	MMUCR
+.LMEMORY_SIZE:		.long	__MEMORY_SIZE
+#ifdef CONFIG_UNCACHED_MAPPING
+.Lcached_to_uncached:	.long	cached_to_uncached
+.Luncached_size:	.long	uncached_size
+#endif
+#endif
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
index 3ea7658..defd851 100644
--- a/arch/sh/kernel/head_64.S
+++ b/arch/sh/kernel/head_64.S
@@ -220,7 +220,6 @@
 	add.l	r22, r63, r22		/* Sign extend */
 	putcfg	r21, 0, r22		/* Set MMUDR[0].PTEH */
 
-#ifdef CONFIG_EARLY_PRINTK
 	/*
 	 * Setup a DTLB translation for SCIF phys.
 	 */
@@ -231,7 +230,6 @@
 	movi    0xfa03, r22	/* 0xfa030000, fixed SCIF virt */
 	shori   0x0003, r22
 	putcfg  r21, 0, r22	/* PTEH last */
-#endif
 
 	/*
 	 * Set cache behaviours.
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
new file mode 100644
index 0000000..e2f1753
--- /dev/null
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -0,0 +1,463 @@
+/*
+ * arch/sh/kernel/hw_breakpoint.c
+ *
+ * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
+ *
+ * Copyright (C) 2009 - 2010  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/perf_event.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/percpu.h>
+#include <linux/kallsyms.h>
+#include <linux/notifier.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <asm/hw_breakpoint.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+
+/*
+ * Stores the breakpoints currently in use on each breakpoint address
+ * register for each cpus
+ */
+static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
+
+/*
+ * A dummy placeholder for early accesses until the CPUs get a chance to
+ * register their UBCs later in the boot process.
+ */
+static struct sh_ubc ubc_dummy = { .num_events = 0 };
+
+static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
+
+/*
+ * Install a perf counter breakpoint.
+ *
+ * We seek a free UBC channel and use it for this breakpoint.
+ *
+ * Atomic: we hold the counter->ctx->lock and we only handle variables
+ * and registers local to this cpu.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+	int i;
+
+	for (i = 0; i < sh_ubc->num_events; i++) {
+		struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
+
+		if (!*slot) {
+			*slot = bp;
+			break;
+		}
+	}
+
+	if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
+		return -EBUSY;
+
+	clk_enable(sh_ubc->clk);
+	sh_ubc->enable(info, i);
+
+	return 0;
+}
+
+/*
+ * Uninstall the breakpoint contained in the given counter.
+ *
+ * First we search the debug address register it uses and then we disable
+ * it.
+ *
+ * Atomic: we hold the counter->ctx->lock and we only handle variables
+ * and registers local to this cpu.
+ */
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+	int i;
+
+	for (i = 0; i < sh_ubc->num_events; i++) {
+		struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
+
+		if (*slot == bp) {
+			*slot = NULL;
+			break;
+		}
+	}
+
+	if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
+		return;
+
+	sh_ubc->disable(info, i);
+	clk_disable(sh_ubc->clk);
+}
+
+static int get_hbp_len(u16 hbp_len)
+{
+	unsigned int len_in_bytes = 0;
+
+	switch (hbp_len) {
+	case SH_BREAKPOINT_LEN_1:
+		len_in_bytes = 1;
+		break;
+	case SH_BREAKPOINT_LEN_2:
+		len_in_bytes = 2;
+		break;
+	case SH_BREAKPOINT_LEN_4:
+		len_in_bytes = 4;
+		break;
+	case SH_BREAKPOINT_LEN_8:
+		len_in_bytes = 8;
+		break;
+	}
+	return len_in_bytes;
+}
+
+/*
+ * Check for virtual address in user space.
+ */
+int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
+{
+	unsigned int len;
+
+	len = get_hbp_len(hbp_len);
+
+	return (va <= TASK_SIZE - len);
+}
+
+/*
+ * Check for virtual address in kernel space.
+ */
+static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
+{
+	unsigned int len;
+
+	len = get_hbp_len(hbp_len);
+
+	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+}
+
+/*
+ * Store a breakpoint's encoded address, length, and type.
+ */
+static int arch_store_info(struct perf_event *bp)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+	/*
+	 * User-space requests will always have the address field populated
+	 * For kernel-addresses, either the address or symbol name can be
+	 * specified.
+	 */
+	if (info->name)
+		info->address = (unsigned long)kallsyms_lookup_name(info->name);
+	if (info->address)
+		return 0;
+
+	return -EINVAL;
+}
+
+int arch_bp_generic_fields(int sh_len, int sh_type,
+			   int *gen_len, int *gen_type)
+{
+	/* Len */
+	switch (sh_len) {
+	case SH_BREAKPOINT_LEN_1:
+		*gen_len = HW_BREAKPOINT_LEN_1;
+		break;
+	case SH_BREAKPOINT_LEN_2:
+		*gen_len = HW_BREAKPOINT_LEN_2;
+		break;
+	case SH_BREAKPOINT_LEN_4:
+		*gen_len = HW_BREAKPOINT_LEN_4;
+		break;
+	case SH_BREAKPOINT_LEN_8:
+		*gen_len = HW_BREAKPOINT_LEN_8;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Type */
+	switch (sh_type) {
+	case SH_BREAKPOINT_READ:
+		*gen_type = HW_BREAKPOINT_R;
+	case SH_BREAKPOINT_WRITE:
+		*gen_type = HW_BREAKPOINT_W;
+		break;
+	case SH_BREAKPOINT_RW:
+		*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int arch_build_bp_info(struct perf_event *bp)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+	info->address = bp->attr.bp_addr;
+
+	/* Len */
+	switch (bp->attr.bp_len) {
+	case HW_BREAKPOINT_LEN_1:
+		info->len = SH_BREAKPOINT_LEN_1;
+		break;
+	case HW_BREAKPOINT_LEN_2:
+		info->len = SH_BREAKPOINT_LEN_2;
+		break;
+	case HW_BREAKPOINT_LEN_4:
+		info->len = SH_BREAKPOINT_LEN_4;
+		break;
+	case HW_BREAKPOINT_LEN_8:
+		info->len = SH_BREAKPOINT_LEN_8;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Type */
+	switch (bp->attr.bp_type) {
+	case HW_BREAKPOINT_R:
+		info->type = SH_BREAKPOINT_READ;
+		break;
+	case HW_BREAKPOINT_W:
+		info->type = SH_BREAKPOINT_WRITE;
+		break;
+	case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
+		info->type = SH_BREAKPOINT_RW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Validate the arch-specific HW Breakpoint register settings
+ */
+int arch_validate_hwbkpt_settings(struct perf_event *bp,
+				  struct task_struct *tsk)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+	unsigned int align;
+	int ret;
+
+	ret = arch_build_bp_info(bp);
+	if (ret)
+		return ret;
+
+	ret = -EINVAL;
+
+	switch (info->len) {
+	case SH_BREAKPOINT_LEN_1:
+		align = 0;
+		break;
+	case SH_BREAKPOINT_LEN_2:
+		align = 1;
+		break;
+	case SH_BREAKPOINT_LEN_4:
+		align = 3;
+		break;
+	case SH_BREAKPOINT_LEN_8:
+		align = 7;
+		break;
+	default:
+		return ret;
+	}
+
+	ret = arch_store_info(bp);
+
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Check that the low-order bits of the address are appropriate
+	 * for the alignment implied by len.
+	 */
+	if (info->address & align)
+		return -EINVAL;
+
+	/* Check that the virtual address is in the proper range */
+	if (tsk) {
+		if (!arch_check_va_in_userspace(info->address, info->len))
+			return -EFAULT;
+	} else {
+		if (!arch_check_va_in_kernelspace(info->address, info->len))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+/*
+ * Release the user breakpoints used by ptrace
+ */
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+	int i;
+	struct thread_struct *t = &tsk->thread;
+
+	for (i = 0; i < sh_ubc->num_events; i++) {
+		unregister_hw_breakpoint(t->ptrace_bps[i]);
+		t->ptrace_bps[i] = NULL;
+	}
+}
+
+static int __kprobes hw_breakpoint_handler(struct die_args *args)
+{
+	int cpu, i, rc = NOTIFY_STOP;
+	struct perf_event *bp;
+	unsigned int cmf, resume_mask;
+
+	/*
+	 * Do an early return if none of the channels triggered.
+	 */
+	cmf = sh_ubc->triggered_mask();
+	if (unlikely(!cmf))
+		return NOTIFY_DONE;
+
+	/*
+	 * By default, resume all of the active channels.
+	 */
+	resume_mask = sh_ubc->active_mask();
+
+	/*
+	 * Disable breakpoints during exception handling.
+	 */
+	sh_ubc->disable_all();
+
+	cpu = get_cpu();
+	for (i = 0; i < sh_ubc->num_events; i++) {
+		unsigned long event_mask = (1 << i);
+
+		if (likely(!(cmf & event_mask)))
+			continue;
+
+		/*
+		 * The counter may be concurrently released but that can only
+		 * occur from a call_rcu() path. We can then safely fetch
+		 * the breakpoint, use its callback, touch its counter
+		 * while we are in an rcu_read_lock() path.
+		 */
+		rcu_read_lock();
+
+		bp = per_cpu(bp_per_reg[i], cpu);
+		if (bp)
+			rc = NOTIFY_DONE;
+
+		/*
+		 * Reset the condition match flag to denote completion of
+		 * exception handling.
+		 */
+		sh_ubc->clear_triggered_mask(event_mask);
+
+		/*
+		 * bp can be NULL due to concurrent perf counter
+		 * removing.
+		 */
+		if (!bp) {
+			rcu_read_unlock();
+			break;
+		}
+
+		/*
+		 * Don't restore the channel if the breakpoint is from
+		 * ptrace, as it always operates in one-shot mode.
+		 */
+		if (bp->overflow_handler == ptrace_triggered)
+			resume_mask &= ~(1 << i);
+
+		perf_bp_event(bp, args->regs);
+
+		/* Deliver the signal to userspace */
+		if (arch_check_va_in_userspace(bp->attr.bp_addr,
+					       bp->attr.bp_len)) {
+			siginfo_t info;
+
+			info.si_signo = args->signr;
+			info.si_errno = notifier_to_errno(rc);
+			info.si_code = TRAP_HWBKPT;
+
+			force_sig_info(args->signr, &info, current);
+		}
+
+		rcu_read_unlock();
+	}
+
+	if (cmf == 0)
+		rc = NOTIFY_DONE;
+
+	sh_ubc->enable_all(resume_mask);
+
+	put_cpu();
+
+	return rc;
+}
+
+BUILD_TRAP_HANDLER(breakpoint)
+{
+	unsigned long ex = lookup_exception_vector();
+	TRAP_HANDLER_DECL;
+
+	notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
+}
+
+/*
+ * Handle debug exception notifications.
+ */
+int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+				    unsigned long val, void *data)
+{
+	struct die_args *args = data;
+
+	if (val != DIE_BREAKPOINT)
+		return NOTIFY_DONE;
+
+	/*
+	 * If the breakpoint hasn't been triggered by the UBC, it's
+	 * probably from a debugger, so don't do anything more here.
+	 *
+	 * This also permits the UBC interface clock to remain off for
+	 * non-UBC breakpoints, as we don't need to check the triggered
+	 * or active channel masks.
+	 */
+	if (args->trapnr != sh_ubc->trap_nr)
+		return NOTIFY_DONE;
+
+	return hw_breakpoint_handler(data);
+}
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+	/* TODO */
+}
+
+void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
+{
+	/* TODO */
+}
+
+int register_sh_ubc(struct sh_ubc *ubc)
+{
+	/* Bail if it's already assigned */
+	if (sh_ubc != &ubc_dummy)
+		return -EBUSY;
+	sh_ubc = ubc;
+
+	pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
+
+	WARN_ON(ubc->num_events > HBP_NUM);
+
+	return 0;
+}
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 6b3d706..0fd7b41 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -20,10 +20,9 @@
 #include <asm/system.h>
 #include <asm/atomic.h>
 
-static int hlt_counter;
 void (*pm_idle)(void) = NULL;
-void (*pm_power_off)(void);
-EXPORT_SYMBOL(pm_power_off);
+
+static int hlt_counter;
 
 static int __init nohlt_setup(char *__unused)
 {
@@ -131,6 +130,15 @@
 {
 }
 
+void stop_this_cpu(void *unused)
+{
+	local_irq_disable();
+	cpu_clear(smp_processor_id(), cpu_online_map);
+
+	for (;;)
+		cpu_sleep();
+}
+
 /*
  * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
  * pm_idle and update to new pm_idle value. Required while changing pm_idle
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
index 69be603..4a8bb4e 100644
--- a/arch/sh/kernel/io_trapped.c
+++ b/arch/sh/kernel/io_trapped.c
@@ -184,31 +184,31 @@
 
 	switch (src_len) {
 	case 1:
-		tmp = ctrl_inb(src_addr);
+		tmp = __raw_readb(src_addr);
 		break;
 	case 2:
-		tmp = ctrl_inw(src_addr);
+		tmp = __raw_readw(src_addr);
 		break;
 	case 4:
-		tmp = ctrl_inl(src_addr);
+		tmp = __raw_readl(src_addr);
 		break;
 	case 8:
-		tmp = ctrl_inq(src_addr);
+		tmp = __raw_readq(src_addr);
 		break;
 	}
 
 	switch (dst_len) {
 	case 1:
-		ctrl_outb(tmp, dst_addr);
+		__raw_writeb(tmp, dst_addr);
 		break;
 	case 2:
-		ctrl_outw(tmp, dst_addr);
+		__raw_writew(tmp, dst_addr);
 		break;
 	case 4:
-		ctrl_outl(tmp, dst_addr);
+		__raw_writel(tmp, dst_addr);
 		break;
 	case 8:
-		ctrl_outq(tmp, dst_addr);
+		__raw_writeq(tmp, dst_addr);
 		break;
 	}
 
@@ -271,6 +271,8 @@
 	insn_size_t instruction;
 	int tmp;
 
+	if (trapped_io_disable)
+		return 0;
 	if (!lookup_tiop(address))
 		return 0;
 
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 3e532d0..70c6965 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -1,7 +1,7 @@
 /*
  * SuperH KGDB support
  *
- * Copyright (C) 2008  Paul Mundt
+ * Copyright (C) 2008 - 2009  Paul Mundt
  *
  * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
  *
@@ -251,24 +251,60 @@
 	local_irq_restore(flags);
 }
 
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+	int ret;
 
-BUILD_TRAP_HANDLER(breakpoint)
+	switch (cmd) {
+	case DIE_BREAKPOINT:
+		/*
+		 * This means a user thread is single stepping
+		 * a system call which should be ignored
+		 */
+		if (test_thread_flag(TIF_SINGLESTEP))
+			return NOTIFY_DONE;
+
+		ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
+					    args->err, args->regs);
+		if (ret)
+			return NOTIFY_DONE;
+
+		break;
+	}
+
+	return NOTIFY_STOP;
+}
+
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
 {
 	unsigned long flags;
-	TRAP_HANDLER_DECL;
+	int ret;
 
 	local_irq_save(flags);
-	kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs);
+	ret = __kgdb_notify(ptr, cmd);
 	local_irq_restore(flags);
+
+	return ret;
 }
 
+static struct notifier_block kgdb_notifier = {
+	.notifier_call	= kgdb_notify,
+
+	/*
+	 * Lowest-prio notifier priority, we want to be notified last:
+	 */
+	.priority	= -INT_MAX,
+};
+
 int kgdb_arch_init(void)
 {
-	return 0;
+	return register_die_notifier(&kgdb_notifier);
 }
 
 void kgdb_arch_exit(void)
 {
+	unregister_die_notifier(&kgdb_notifier);
 }
 
 struct kgdb_arch arch_kgdb_ops = {
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 76f2802..7672141 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -21,6 +21,8 @@
 #include <asm/mmu_context.h>
 #include <asm/io.h>
 #include <asm/cacheflush.h>
+#include <asm/sh_bios.h>
+#include <asm/reboot.h>
 
 typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
 				      unsigned long reboot_code_buffer,
@@ -28,15 +30,11 @@
 
 extern const unsigned char relocate_new_kernel[];
 extern const unsigned int relocate_new_kernel_size;
-extern void *gdb_vbr_vector;
 extern void *vbr_base;
 
-void machine_shutdown(void)
+void native_machine_crash_shutdown(struct pt_regs *regs)
 {
-}
-
-void machine_crash_shutdown(struct pt_regs *regs)
-{
+	/* Nothing to do for UP, but definitely broken for SMP.. */
 }
 
 /*
@@ -117,11 +115,7 @@
 	kexec_info(image);
 	flush_cache_all();
 
-#if defined(CONFIG_SH_STANDARD_BIOS)
-	asm volatile("ldc %0, vbr" :
-		     : "r" (((unsigned long) gdb_vbr_vector) - 0x100)
-		     : "memory");
-#endif
+	sh_bios_vbr_reload();
 
 	/* now call it */
 	rnk = (relocate_new_kernel_t) reboot_code_buffer;
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
new file mode 100644
index 0000000..81add9b
--- /dev/null
+++ b/arch/sh/kernel/process.c
@@ -0,0 +1,100 @@
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+struct kmem_cache *task_xstate_cachep = NULL;
+unsigned int xstate_size;
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+	*dst = *src;
+
+	if (src->thread.xstate) {
+		dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
+						      GFP_KERNEL);
+		if (!dst->thread.xstate)
+			return -ENOMEM;
+		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
+	}
+
+	return 0;
+}
+
+void free_thread_xstate(struct task_struct *tsk)
+{
+	if (tsk->thread.xstate) {
+		kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
+		tsk->thread.xstate = NULL;
+	}
+}
+
+#if THREAD_SHIFT < PAGE_SHIFT
+static struct kmem_cache *thread_info_cache;
+
+struct thread_info *alloc_thread_info(struct task_struct *tsk)
+{
+	struct thread_info *ti;
+
+	ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
+	if (unlikely(ti == NULL))
+		return NULL;
+#ifdef CONFIG_DEBUG_STACK_USAGE
+	memset(ti, 0, THREAD_SIZE);
+#endif
+	return ti;
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+	free_thread_xstate(ti->task);
+	kmem_cache_free(thread_info_cache, ti);
+}
+
+void thread_info_cache_init(void)
+{
+	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+					      THREAD_SIZE, SLAB_PANIC, NULL);
+}
+#else
+struct thread_info *alloc_thread_info(struct task_struct *tsk)
+{
+#ifdef CONFIG_DEBUG_STACK_USAGE
+	gfp_t mask = GFP_KERNEL | __GFP_ZERO;
+#else
+	gfp_t mask = GFP_KERNEL;
+#endif
+	return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+	free_thread_xstate(ti->task);
+	free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+}
+#endif /* THREAD_SHIFT < PAGE_SHIFT */
+
+void arch_task_cache_init(void)
+{
+	if (!xstate_size)
+		return;
+
+	task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
+					       __alignof__(union thread_xstate),
+					       SLAB_PANIC | SLAB_NOTRACK, NULL);
+}
+
+#ifdef CONFIG_SH_FPU_EMU
+# define HAVE_SOFTFP	1
+#else
+# define HAVE_SOFTFP	0
+#endif
+
+void init_thread_xstate(void)
+{
+	if (boot_cpu_data.flags & CPU_HAS_FPU)
+		xstate_size = sizeof(struct sh_fpu_hard_struct);
+	else if (HAVE_SOFTFP)
+		xstate_size = sizeof(struct sh_fpu_soft_struct);
+	else
+		xstate_size = 0;
+}
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index d8af889..3cb88f1 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -16,65 +16,15 @@
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/elfcore.h>
-#include <linux/pm.h>
 #include <linux/kallsyms.h>
-#include <linux/kexec.h>
-#include <linux/kdebug.h>
-#include <linux/tick.h>
-#include <linux/reboot.h>
 #include <linux/fs.h>
 #include <linux/ftrace.h>
-#include <linux/preempt.h>
+#include <linux/hw_breakpoint.h>
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
-#include <asm/pgalloc.h>
 #include <asm/system.h>
-#include <asm/ubc.h>
 #include <asm/fpu.h>
 #include <asm/syscalls.h>
-#include <asm/watchdog.h>
-
-int ubc_usercnt = 0;
-
-#ifdef CONFIG_32BIT
-static void watchdog_trigger_immediate(void)
-{
-	sh_wdt_write_cnt(0xFF);
-	sh_wdt_write_csr(0xC2);
-}
-
-void machine_restart(char * __unused)
-{
-	local_irq_disable();
-
-	/* Use watchdog timer to trigger reset */
-	watchdog_trigger_immediate();
-
-	while (1)
-		cpu_sleep();
-}
-#else
-void machine_restart(char * __unused)
-{
-	/* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
-	asm volatile("ldc %0, sr\n\t"
-		     "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
-}
-#endif
-
-void machine_halt(void)
-{
-	local_irq_disable();
-
-	while (1)
-		cpu_sleep();
-}
-
-void machine_power_off(void)
-{
-	if (pm_power_off)
-		pm_power_off();
-}
 
 void show_regs(struct pt_regs * regs)
 {
@@ -91,7 +41,7 @@
 	printk("PC  : %08lx SP  : %08lx SR  : %08lx ",
 	       regs->pc, regs->regs[15], regs->sr);
 #ifdef CONFIG_MMU
-	printk("TEA : %08x\n", ctrl_inl(MMU_TEA));
+	printk("TEA : %08x\n", __raw_readl(MMU_TEA));
 #else
 	printk("\n");
 #endif
@@ -147,21 +97,34 @@
 }
 EXPORT_SYMBOL(kernel_thread);
 
+void start_thread(struct pt_regs *regs, unsigned long new_pc,
+		  unsigned long new_sp)
+{
+	set_fs(USER_DS);
+
+	regs->pr = 0;
+	regs->sr = SR_FD;
+	regs->pc = new_pc;
+	regs->regs[15] = new_sp;
+
+	free_thread_xstate(current);
+}
+EXPORT_SYMBOL(start_thread);
+
 /*
  * Free current thread data structures etc..
  */
 void exit_thread(void)
 {
-	if (current->thread.ubc_pc) {
-		current->thread.ubc_pc = 0;
-		ubc_usercnt -= 1;
-	}
 }
 
 void flush_thread(void)
 {
-#if defined(CONFIG_SH_FPU)
 	struct task_struct *tsk = current;
+
+	flush_ptrace_hw_breakpoint(tsk);
+
+#if defined(CONFIG_SH_FPU)
 	/* Forget lazy FPU state */
 	clear_fpu(tsk, task_pt_regs(tsk));
 	clear_used_math();
@@ -209,11 +172,10 @@
 {
 	struct thread_info *ti = task_thread_info(p);
 	struct pt_regs *childregs;
-#if defined(CONFIG_SH_DSP)
-	struct task_struct *tsk = current;
-#endif
 
 #if defined(CONFIG_SH_DSP)
+	struct task_struct *tsk = current;
+
 	if (is_dsp_enabled(tsk)) {
 		/* We can use the __save_dsp or just copy the struct:
 		 * __save_dsp(p);
@@ -244,53 +206,11 @@
 	p->thread.sp = (unsigned long) childregs;
 	p->thread.pc = (unsigned long) ret_from_fork;
 
-	p->thread.ubc_pc = 0;
+	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
 
 	return 0;
 }
 
-/* Tracing by user break controller.  */
-static void ubc_set_tracing(int asid, unsigned long pc)
-{
-#if defined(CONFIG_CPU_SH4A)
-	unsigned long val;
-
-	val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
-	val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
-
-	ctrl_outl(val, UBC_CBR0);
-	ctrl_outl(pc,  UBC_CAR0);
-	ctrl_outl(0x0, UBC_CAMR0);
-	ctrl_outl(0x0, UBC_CBCR);
-
-	val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
-	ctrl_outl(val, UBC_CRR0);
-
-	/* Read UBC register that we wrote last, for checking update */
-	val = ctrl_inl(UBC_CRR0);
-
-#else	/* CONFIG_CPU_SH4A */
-	ctrl_outl(pc, UBC_BARA);
-
-#ifdef CONFIG_MMU
-	ctrl_outb(asid, UBC_BASRA);
-#endif
-
-	ctrl_outl(0, UBC_BAMRA);
-
-	if (current_cpu_data.type == CPU_SH7729 ||
-	    current_cpu_data.type == CPU_SH7710 ||
-	    current_cpu_data.type == CPU_SH7712 ||
-	    current_cpu_data.type == CPU_SH7203){
-		ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
-		ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
-	} else {
-		ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
-		ctrl_outw(BRCR_PCBA, UBC_BRCR);
-	}
-#endif	/* CONFIG_CPU_SH4A */
-}
-
 /*
  *	switch_to(x,y) should switch tasks from x to y.
  *
@@ -304,7 +224,7 @@
 
 	/* we're going to use this soon, after a few expensive things */
 	if (next->fpu_counter > 5)
-		prefetch(&next_t->fpu.hard);
+		prefetch(next_t->xstate);
 
 #ifdef CONFIG_MMU
 	/*
@@ -316,32 +236,13 @@
 		     : "r" (task_thread_info(next)));
 #endif
 
-	/* If no tasks are using the UBC, we're done */
-	if (ubc_usercnt == 0)
-		/* If no tasks are using the UBC, we're done */;
-	else if (next->thread.ubc_pc && next->mm) {
-		int asid = 0;
-#ifdef CONFIG_MMU
-		asid |= cpu_asid(smp_processor_id(), next->mm);
-#endif
-		ubc_set_tracing(asid, next->thread.ubc_pc);
-	} else {
-#if defined(CONFIG_CPU_SH4A)
-		ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
-		ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
-#else
-		ctrl_outw(0, UBC_BBRA);
-		ctrl_outw(0, UBC_BBRB);
-#endif
-	}
-
 	/*
 	 * If the task has used fpu the last 5 timeslices, just do a full
 	 * restore of the math state immediately to avoid the trap; the
 	 * chances of needing FPU soon are obviously high now
 	 */
 	if (next->fpu_counter > 5)
-		fpu_state_restore(task_pt_regs(next));
+		__fpu_state_restore();
 
 	return prev;
 }
@@ -434,20 +335,3 @@
 
 	return pc;
 }
-
-asmlinkage void break_point_trap(void)
-{
-	/* Clear tracing.  */
-#if defined(CONFIG_CPU_SH4A)
-	ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
-	ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
-#else
-	ctrl_outw(0, UBC_BBRA);
-	ctrl_outw(0, UBC_BBRB);
-	ctrl_outl(0, UBC_BRCR);
-#endif
-	current->thread.ubc_pc = 0;
-	ubc_usercnt -= 1;
-
-	force_sig(SIGTRAP, current);
-}
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index ec79faf..c90957a 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -32,30 +32,7 @@
 
 struct task_struct *last_task_used_math = NULL;
 
-void machine_restart(char * __unused)
-{
-	extern void phys_stext(void);
-
-	phys_stext();
-}
-
-void machine_halt(void)
-{
-	for (;;);
-}
-
-void machine_power_off(void)
-{
-	__asm__ __volatile__ (
-		"sleep\n\t"
-		"synci\n\t"
-		"nop;nop;nop;nop\n\t"
-	);
-
-	panic("Unexpected wakeup!\n");
-}
-
-void show_regs(struct pt_regs * regs)
+void show_regs(struct pt_regs *regs)
 {
 	unsigned long long ah, al, bh, bl, ch, cl;
 
@@ -410,7 +387,7 @@
 			regs->sr |= SR_FD;
 		}
 
-		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+		memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
 	}
 
 	return fpvalid;
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 9be35f3..c625cda 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -2,7 +2,7 @@
  * SuperH process tracing
  *
  * Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
- * Copyright (C) 2002 - 2008  Paul Mundt
+ * Copyright (C) 2002 - 2009  Paul Mundt
  *
  * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
  *
@@ -26,6 +26,7 @@
 #include <linux/tracehook.h>
 #include <linux/elf.h>
 #include <linux/regset.h>
+#include <linux/hw_breakpoint.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -63,33 +64,64 @@
 	return 0;
 }
 
+void ptrace_triggered(struct perf_event *bp, int nmi,
+		      struct perf_sample_data *data, struct pt_regs *regs)
+{
+	struct perf_event_attr attr;
+
+	/*
+	 * Disable the breakpoint request here since ptrace has defined a
+	 * one-shot behaviour for breakpoint exceptions.
+	 */
+	attr = bp->attr;
+	attr.disabled = true;
+	modify_user_hw_breakpoint(bp, &attr);
+}
+
+static int set_single_step(struct task_struct *tsk, unsigned long addr)
+{
+	struct thread_struct *thread = &tsk->thread;
+	struct perf_event *bp;
+	struct perf_event_attr attr;
+
+	bp = thread->ptrace_bps[0];
+	if (!bp) {
+		hw_breakpoint_init(&attr);
+
+		attr.bp_addr = addr;
+		attr.bp_len = HW_BREAKPOINT_LEN_2;
+		attr.bp_type = HW_BREAKPOINT_R;
+
+		bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
+		if (IS_ERR(bp))
+			return PTR_ERR(bp);
+
+		thread->ptrace_bps[0] = bp;
+	} else {
+		int err;
+
+		attr = bp->attr;
+		attr.bp_addr = addr;
+		err = modify_user_hw_breakpoint(bp, &attr);
+		if (unlikely(err))
+			return err;
+	}
+
+	return 0;
+}
+
 void user_enable_single_step(struct task_struct *child)
 {
-	/* Next scheduling will set up UBC */
-	if (child->thread.ubc_pc == 0)
-		ubc_usercnt += 1;
-
-	child->thread.ubc_pc = get_stack_long(child,
-				offsetof(struct pt_regs, pc));
+	unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
 
 	set_tsk_thread_flag(child, TIF_SINGLESTEP);
+
+	set_single_step(child, pc);
 }
 
 void user_disable_single_step(struct task_struct *child)
 {
 	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-
-	/*
-	 * Ensure the UBC is not programmed at the next context switch.
-	 *
-	 * Normally this is not needed but there are sequences such as
-	 * singlestep, signal delivery, and continue that leave the
-	 * ubc_pc non-zero leading to spurious SIGTRAPs.
-	 */
-	if (child->thread.ubc_pc != 0) {
-		ubc_usercnt -= 1;
-		child->thread.ubc_pc = 0;
-	}
 }
 
 /*
@@ -163,10 +195,10 @@
 
 	if ((boot_cpu_data.flags & CPU_HAS_FPU))
 		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-					   &target->thread.fpu.hard, 0, -1);
+					   &target->thread.xstate->hardfpu, 0, -1);
 
 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-				   &target->thread.fpu.soft, 0, -1);
+				   &target->thread.xstate->softfpu, 0, -1);
 }
 
 static int fpregs_set(struct task_struct *target,
@@ -184,10 +216,10 @@
 
 	if ((boot_cpu_data.flags & CPU_HAS_FPU))
 		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-					  &target->thread.fpu.hard, 0, -1);
+					  &target->thread.xstate->hardfpu, 0, -1);
 
 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-				  &target->thread.fpu.soft, 0, -1);
+				  &target->thread.xstate->softfpu, 0, -1);
 }
 
 static int fpregs_active(struct task_struct *target,
@@ -333,7 +365,7 @@
 				else
 					tmp = 0;
 			} else
-				tmp = ((long *)&child->thread.fpu)
+				tmp = ((long *)child->thread.xstate)
 					[(addr - (long)&dummy->fpu) >> 2];
 		} else if (addr == (long) &dummy->u_fpvalid)
 			tmp = !!tsk_used_math(child);
@@ -362,7 +394,7 @@
 		else if (addr >= (long) &dummy->fpu &&
 			 addr < (long) &dummy->u_fpvalid) {
 			set_stopped_child_used_math(child);
-			((long *)&child->thread.fpu)
+			((long *)child->thread.xstate)
 				[(addr - (long)&dummy->fpu) >> 2] = data;
 			ret = 0;
 		} else if (addr == (long) &dummy->u_fpvalid) {
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index b063eb8..5fd644d 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -88,7 +88,7 @@
 		regs->sr |= SR_FD;
 	}
 
-	tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
+	tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
 	return tmp;
 }
 
@@ -114,8 +114,7 @@
 	regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
 
 	if (!tsk_used_math(task)) {
-		fpinit(&task->thread.fpu.hard);
-		set_stopped_child_used_math(task);
+		init_fpu(task);
 	} else if (last_task_used_math == task) {
 		enable_fpu();
 		save_fpu(task);
@@ -124,7 +123,7 @@
 		regs->sr |= SR_FD;
 	}
 
-	((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
+	((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
 	return 0;
 }
 
@@ -226,7 +225,7 @@
 		return ret;
 
 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-				   &target->thread.fpu.hard, 0, -1);
+				   &target->thread.xstate->hardfpu, 0, -1);
 }
 
 static int fpregs_set(struct task_struct *target,
@@ -243,7 +242,7 @@
 	set_stopped_child_used_math(target);
 
 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-				  &target->thread.fpu.hard, 0, -1);
+				  &target->thread.xstate->hardfpu, 0, -1);
 }
 
 static int fpregs_active(struct task_struct *target,
@@ -486,9 +485,10 @@
 }
 
 /* Called with interrupts disabled */
-asmlinkage void do_software_break_point(unsigned long long vec,
-					struct pt_regs *regs)
+BUILD_TRAP_HANDLER(breakpoint)
 {
+	TRAP_HANDLER_DECL;
+
 	/* We need to forward step the PC, to counteract the backstep done
 	   in signal.c. */
 	local_irq_enable();
diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c
new file mode 100644
index 0000000..b1fca66
--- /dev/null
+++ b/arch/sh/kernel/reboot.c
@@ -0,0 +1,98 @@
+#include <linux/pm.h>
+#include <linux/kexec.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/module.h>
+#ifdef CONFIG_SUPERH32
+#include <asm/watchdog.h>
+#endif
+#include <asm/addrspace.h>
+#include <asm/reboot.h>
+#include <asm/system.h>
+
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+#ifdef CONFIG_SUPERH32
+static void watchdog_trigger_immediate(void)
+{
+	sh_wdt_write_cnt(0xFF);
+	sh_wdt_write_csr(0xC2);
+}
+#endif
+
+static void native_machine_restart(char * __unused)
+{
+	local_irq_disable();
+
+	/* Address error with SR.BL=1 first. */
+	trigger_address_error();
+
+#ifdef CONFIG_SUPERH32
+	/* If that fails or is unsupported, go for the watchdog next. */
+	watchdog_trigger_immediate();
+#endif
+
+	/*
+	 * Give up and sleep.
+	 */
+	while (1)
+		cpu_sleep();
+}
+
+static void native_machine_shutdown(void)
+{
+	smp_send_stop();
+}
+
+static void native_machine_power_off(void)
+{
+	if (pm_power_off)
+		pm_power_off();
+}
+
+static void native_machine_halt(void)
+{
+	/* stop other cpus */
+	machine_shutdown();
+
+	/* stop this cpu */
+	stop_this_cpu(NULL);
+}
+
+struct machine_ops machine_ops = {
+	.power_off	= native_machine_power_off,
+	.shutdown	= native_machine_shutdown,
+	.restart	= native_machine_restart,
+	.halt		= native_machine_halt,
+#ifdef CONFIG_KEXEC
+	.crash_shutdown = native_machine_crash_shutdown,
+#endif
+};
+
+void machine_power_off(void)
+{
+	machine_ops.power_off();
+}
+
+void machine_shutdown(void)
+{
+	machine_ops.shutdown();
+}
+
+void machine_restart(char *cmd)
+{
+	machine_ops.restart(cmd);
+}
+
+void machine_halt(void)
+{
+	machine_ops.halt();
+}
+
+#ifdef CONFIG_KEXEC
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+	machine_ops.crash_shutdown(regs);
+}
+#endif
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 8b0e697..3459e70 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -421,6 +421,8 @@
 
 	parse_early_param();
 
+	uncached_init();
+
 	plat_early_device_setup();
 
 	/* Let earlyprintk output early console messages */
@@ -449,17 +451,15 @@
 #ifdef CONFIG_DUMMY_CONSOLE
 	conswitchp = &dummy_con;
 #endif
+	paging_init();
+	pmb_init();
+
+	ioremap_fixed_init();
 
 	/* Perform the machine specific initialisation */
 	if (likely(sh_mv.mv_setup))
 		sh_mv.mv_setup(cmdline_p);
 
-	paging_init();
-
-#ifdef CONFIG_PMB_ENABLE
-	pmb_init();
-#endif
-
 #ifdef CONFIG_SMP
 	plat_smp_setup();
 #endif
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index c852f78..47475cc 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
@@ -1,19 +1,30 @@
 /*
- *  linux/arch/sh/kernel/sh_bios.c
  *  C interface for trapping into the standard LinuxSH BIOS.
  *
  *  Copyright (C) 2000 Greg Banks, Mitch Davis
+ *  Copyright (C) 1999, 2000  Niibe Yutaka
+ *  Copyright (C) 2002  M. R. Brown
+ *  Copyright (C) 2004 - 2010  Paul Mundt
  *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  */
 #include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
 #include <asm/sh_bios.h>
 
 #define BIOS_CALL_CONSOLE_WRITE		0
 #define BIOS_CALL_ETH_NODE_ADDR		10
 #define BIOS_CALL_SHUTDOWN		11
-#define BIOS_CALL_CHAR_OUT		0x1f	/* TODO: hack */
 #define BIOS_CALL_GDB_DETACH		0xff
 
+void *gdb_vbr_vector = NULL;
+
 static inline long sh_bios_call(long func, long arg0, long arg1, long arg2,
 				    long arg3)
 {
@@ -23,6 +34,9 @@
 	register long r6 __asm__("r6") = arg2;
 	register long r7 __asm__("r7") = arg3;
 
+	if (!gdb_vbr_vector)
+		return -ENOSYS;
+
 	__asm__ __volatile__("trapa	#0x3f":"=z"(r0)
 			     :"0"(r0), "r"(r4), "r"(r5), "r"(r6), "r"(r7)
 			     :"memory");
@@ -34,11 +48,6 @@
 	sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0);
 }
 
-void sh_bios_char_out(char ch)
-{
-	sh_bios_call(BIOS_CALL_CHAR_OUT, ch, 0, 0, 0);
-}
-
 void sh_bios_gdb_detach(void)
 {
 	sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0);
@@ -55,3 +64,109 @@
 {
 	sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0);
 }
+
+/*
+ * Read the old value of the VBR register to initialise the vector
+ * through which debug and BIOS traps are delegated by the Linux trap
+ * handler.
+ */
+void sh_bios_vbr_init(void)
+{
+	unsigned long vbr;
+
+	if (unlikely(gdb_vbr_vector))
+		return;
+
+	__asm__ __volatile__ ("stc vbr, %0" : "=r" (vbr));
+
+	if (vbr) {
+		gdb_vbr_vector = (void *)(vbr + 0x100);
+		printk(KERN_NOTICE "Setting GDB trap vector to %p\n",
+		       gdb_vbr_vector);
+	} else
+		printk(KERN_NOTICE "SH-BIOS not detected\n");
+}
+
+/**
+ * sh_bios_vbr_reload - Re-load the system VBR from the BIOS vector.
+ *
+ * This can be used by save/restore code to reinitialize the system VBR
+ * from the fixed BIOS VBR. A no-op if no BIOS VBR is known.
+ */
+void sh_bios_vbr_reload(void)
+{
+	if (gdb_vbr_vector)
+		__asm__ __volatile__ (
+			"ldc %0, vbr"
+			:
+			: "r" (((unsigned long) gdb_vbr_vector) - 0x100)
+			: "memory"
+		);
+}
+
+/*
+ *	Print a string through the BIOS
+ */
+static void sh_console_write(struct console *co, const char *s,
+				 unsigned count)
+{
+	sh_bios_console_write(s, count);
+}
+
+/*
+ *	Setup initial baud/bits/parity. We do two things here:
+ *	- construct a cflag setting for the first rs_open()
+ *	- initialize the serial port
+ *	Return non-zero if we didn't find a serial port.
+ */
+static int __init sh_console_setup(struct console *co, char *options)
+{
+	int	cflag = CREAD | HUPCL | CLOCAL;
+
+	/*
+	 *	Now construct a cflag setting.
+	 *	TODO: this is a totally bogus cflag, as we have
+	 *	no idea what serial settings the BIOS is using, or
+	 *	even if its using the serial port at all.
+	 */
+	cflag |= B115200 | CS8 | /*no parity*/0;
+
+	co->cflag = cflag;
+
+	return 0;
+}
+
+static struct console bios_console = {
+	.name		= "bios",
+	.write		= sh_console_write,
+	.setup		= sh_console_setup,
+	.flags		= CON_PRINTBUFFER,
+	.index		= -1,
+};
+
+static struct console *early_console;
+
+static int __init setup_early_printk(char *buf)
+{
+	int keep_early = 0;
+
+	if (!buf)
+		return 0;
+
+	if (strstr(buf, "keep"))
+		keep_early = 1;
+
+	if (!strncmp(buf, "bios", 4))
+		early_console = &bios_console;
+
+	if (likely(early_console)) {
+		if (keep_early)
+			early_console->flags &= ~CON_BOOT;
+		else
+			early_console->flags |= CON_BOOT;
+		register_console(early_console);
+	}
+
+	return 0;
+}
+early_param("earlyprintk", setup_early_printk);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 12815ce..579cd2c 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -150,7 +150,7 @@
 		return 0;
 
 	set_used_math();
-	return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0],
+	return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0],
 				sizeof(long)*(16*2+2));
 }
 
@@ -175,7 +175,7 @@
 	clear_used_math();
 
 	unlazy_fpu(tsk, regs);
-	return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard,
+	return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu,
 			      sizeof(long)*(16*2+2));
 }
 #endif /* CONFIG_SH_FPU */
@@ -528,7 +528,7 @@
 		/* fallthrough */
 		case -ERESTARTNOINTR:
 			regs->regs[0] = save_r0;
-			regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+			regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
 			break;
 	}
 }
@@ -626,9 +626,9 @@
 		    regs->regs[0] == -ERESTARTSYS ||
 		    regs->regs[0] == -ERESTARTNOINTR) {
 			regs->regs[0] = save_r0;
-			regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+			regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
 		} else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
-			regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+			regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
 			regs->regs[3] = __NR_restart_syscall;
 		}
 	}
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 580e97d..5a9f1f1 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -297,7 +297,7 @@
 		regs->sr |= SR_FD;
 	}
 
-	err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
+	err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
 				(sizeof(long long) * 32) + (sizeof(int) * 1));
 
 	return err;
@@ -322,7 +322,7 @@
 		regs->sr |= SR_FD;
 	}
 
-	err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
+	err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
 			      (sizeof(long long) * 32) + (sizeof(int) * 1));
 	clear_used_math();
 
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 983e079..e124cf7 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -161,15 +161,6 @@
 	plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);
 }
 
-static void stop_this_cpu(void *unused)
-{
-	cpu_clear(smp_processor_id(), cpu_online_map);
-	local_irq_disable();
-
-	for (;;)
-		cpu_relax();
-}
-
 void smp_send_stop(void)
 {
 	smp_call_function(stop_this_cpu, 0, 0);
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 7b03633..0830c2a 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -58,7 +58,7 @@
 	TRAP_HANDLER_DECL;
 
 	/* Rewind */
-	regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+	regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
 
 	if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff,
 		       SIGTRAP) == NOTIFY_STOP)
@@ -75,7 +75,7 @@
 	TRAP_HANDLER_DECL;
 
 	/* Rewind */
-	regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+	regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
 
 	if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
 		       SIGTRAP) == NOTIFY_STOP)
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 86639be..c3d86fa 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -24,11 +24,10 @@
 #include <linux/kdebug.h>
 #include <linux/kexec.h>
 #include <linux/limits.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
 #include <linux/sysfs.h>
+#include <linux/uaccess.h>
 #include <asm/system.h>
-#include <asm/uaccess.h>
+#include <asm/alignment.h>
 #include <asm/fpu.h>
 #include <asm/kprobes.h>
 
@@ -47,73 +46,6 @@
 #define TRAP_ILLEGAL_SLOT_INST	13
 #endif
 
-static unsigned long se_user;
-static unsigned long se_sys;
-static unsigned long se_half;
-static unsigned long se_word;
-static unsigned long se_dword;
-static unsigned long se_multi;
-/* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not
-   valid! */
-static int se_usermode = 3;
-/* 0: no warning 1: print a warning message, disabled by default */
-static int se_kernmode_warn;
-
-#ifdef CONFIG_PROC_FS
-static const char *se_usermode_action[] = {
-	"ignored",
-	"warn",
-	"fixup",
-	"fixup+warn",
-	"signal",
-	"signal+warn"
-};
-
-static int alignment_proc_show(struct seq_file *m, void *v)
-{
-	seq_printf(m, "User:\t\t%lu\n", se_user);
-	seq_printf(m, "System:\t\t%lu\n", se_sys);
-	seq_printf(m, "Half:\t\t%lu\n", se_half);
-	seq_printf(m, "Word:\t\t%lu\n", se_word);
-	seq_printf(m, "DWord:\t\t%lu\n", se_dword);
-	seq_printf(m, "Multi:\t\t%lu\n", se_multi);
-	seq_printf(m, "User faults:\t%i (%s)\n", se_usermode,
-			se_usermode_action[se_usermode]);
-	seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
-			se_kernmode_warn ? "+warn" : "");
-	return 0;
-}
-
-static int alignment_proc_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, alignment_proc_show, NULL);
-}
-
-static ssize_t alignment_proc_write(struct file *file,
-		const char __user *buffer, size_t count, loff_t *pos)
-{
-	int *data = PDE(file->f_path.dentry->d_inode)->data;
-	char mode;
-
-	if (count > 0) {
-		if (get_user(mode, buffer))
-			return -EFAULT;
-		if (mode >= '0' && mode <= '5')
-			*data = mode - '0';
-	}
-	return count;
-}
-
-static const struct file_operations alignment_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= alignment_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= alignment_proc_write,
-};
-#endif
-
 static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
 {
 	unsigned long p;
@@ -265,10 +197,10 @@
 	count = 1<<(instruction&3);
 
 	switch (count) {
-	case 1: se_half  += 1; break;
-	case 2: se_word  += 1; break;
-	case 4: se_dword += 1; break;
-	case 8: se_multi += 1; break; /* ??? */
+	case 1: inc_unaligned_byte_access(); break;
+	case 2: inc_unaligned_word_access(); break;
+	case 4: inc_unaligned_dword_access(); break;
+	case 8: inc_unaligned_multi_access(); break;
 	}
 
 	ret = -EFAULT;
@@ -452,18 +384,8 @@
 	rm = regs->regs[index];
 
 	/* shout about fixups */
-	if (!expected) {
-		if (user_mode(regs) && (se_usermode & 1) && printk_ratelimit())
-			pr_notice("Fixing up unaligned userspace access "
-				  "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
-				  current->comm, task_pid_nr(current),
-				  (void *)regs->pc, instruction);
-		else if (se_kernmode_warn && printk_ratelimit())
-			pr_notice("Fixing up unaligned kernel access "
-				  "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
-				  current->comm, task_pid_nr(current),
-				  (void *)regs->pc, instruction);
-	}
+	if (!expected)
+		unaligned_fixups_notify(current, instruction, regs);
 
 	ret = -EFAULT;
 	switch (instruction&0xF000) {
@@ -616,10 +538,10 @@
 
 	if (user_mode(regs)) {
 		int si_code = BUS_ADRERR;
+		unsigned int user_action;
 
 		local_irq_enable();
-
-		se_user += 1;
+		inc_unaligned_user_access();
 
 		set_fs(USER_DS);
 		if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
@@ -630,16 +552,12 @@
 		set_fs(oldfs);
 
 		/* shout about userspace fixups */
-		if (se_usermode & 1)
-			printk(KERN_NOTICE "Unaligned userspace access "
-			       "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
-			       current->comm, current->pid, (void *)regs->pc,
-			       instruction);
+		unaligned_fixups_notify(current, instruction, regs);
 
-		if (se_usermode & 2)
+		user_action = unaligned_user_action();
+		if (user_action & UM_FIXUP)
 			goto fixup;
-
-		if (se_usermode & 4)
+		if (user_action & UM_SIGNAL)
 			goto uspace_segv;
 		else {
 			/* ignore */
@@ -659,7 +577,7 @@
 					      &user_mem_access, 0);
 		set_fs(oldfs);
 
-		if (tmp==0)
+		if (tmp == 0)
 			return; /* sorted */
 uspace_segv:
 		printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
@@ -672,7 +590,7 @@
 		info.si_addr = (void __user *)address;
 		force_sig_info(SIGBUS, &info, current);
 	} else {
-		se_sys += 1;
+		inc_unaligned_kernel_access();
 
 		if (regs->pc & 1)
 			die("unaligned program counter", regs, error_code);
@@ -687,11 +605,7 @@
 			die("insn faulting in do_address_error", regs, 0);
 		}
 
-		if (se_kernmode_warn)
-			printk(KERN_NOTICE "Unaligned kernel access "
-			       "on behalf of \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
-			       current->comm, current->pid, (void *)regs->pc,
-			       instruction);
+		unaligned_fixups_notify(current, instruction, regs);
 
 		handle_unaligned_access(instruction, regs,
 					&user_mem_access, 0);
@@ -876,35 +790,10 @@
 	die_if_kernel("exception", regs, ex);
 }
 
-#if defined(CONFIG_SH_STANDARD_BIOS)
-void *gdb_vbr_vector;
-
-static inline void __init gdb_vbr_init(void)
-{
-	register unsigned long vbr;
-
-	/*
-	 * Read the old value of the VBR register to initialise
-	 * the vector through which debug and BIOS traps are
-	 * delegated by the Linux trap handler.
-	 */
-	asm volatile("stc vbr, %0" : "=r" (vbr));
-
-	gdb_vbr_vector = (void *)(vbr + 0x100);
-	printk("Setting GDB trap vector to 0x%08lx\n",
-	       (unsigned long)gdb_vbr_vector);
-}
-#endif
-
 void __cpuinit per_cpu_trap_init(void)
 {
 	extern void *vbr_base;
 
-#ifdef CONFIG_SH_STANDARD_BIOS
-	if (raw_smp_processor_id() == 0)
-		gdb_vbr_init();
-#endif
-
 	/* NOTE: The VBR value should be at P1
 	   (or P2, virtural "fixed" address space).
 	   It's definitely should not in physical address.  */
@@ -956,11 +845,8 @@
 #endif
 
 #ifdef TRAP_UBC
-	set_exception_table_vec(TRAP_UBC, break_point_trap);
+	set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
 #endif
-
-	/* Setup VBR for boot cpu */
-	per_cpu_trap_init();
 }
 
 void show_stack(struct task_struct *tsk, unsigned long *sp)
@@ -985,34 +871,3 @@
 	show_stack(NULL, NULL);
 }
 EXPORT_SYMBOL(dump_stack);
-
-#ifdef CONFIG_PROC_FS
-/*
- * This needs to be done after sysctl_init, otherwise sys/ will be
- * overwritten.  Actually, this shouldn't be in sys/ at all since
- * it isn't a sysctl, and it doesn't contain sysctl information.
- * We now locate it in /proc/cpu/alignment instead.
- */
-static int __init alignment_init(void)
-{
-	struct proc_dir_entry *dir, *res;
-
-	dir = proc_mkdir("cpu", NULL);
-	if (!dir)
-		return -ENOMEM;
-
-	res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir,
-			       &alignment_proc_fops, &se_usermode);
-	if (!res)
-		return -ENOMEM;
-
-        res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir,
-			       &alignment_proc_fops, &se_kernmode_warn);
-        if (!res)
-                return -ENOMEM;
-
-	return 0;
-}
-
-fs_initcall(alignment_init);
-#endif
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index d86f531..e3f92eb 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -611,19 +611,19 @@
 
 		switch (width_shift) {
 		case 2:
-			current->thread.fpu.hard.fp_regs[destreg] = buflo;
+			current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
 			break;
 		case 3:
 			if (do_paired_load) {
-				current->thread.fpu.hard.fp_regs[destreg] = buflo;
-				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+				current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+				current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
 			} else {
 #if defined(CONFIG_CPU_LITTLE_ENDIAN)
-				current->thread.fpu.hard.fp_regs[destreg] = bufhi;
-				current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
+				current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
+				current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
 #else
-				current->thread.fpu.hard.fp_regs[destreg] = buflo;
-				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+				current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+				current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
 #endif
 			}
 			break;
@@ -681,19 +681,19 @@
 
 		switch (width_shift) {
 		case 2:
-			buflo = current->thread.fpu.hard.fp_regs[srcreg];
+			buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
 			break;
 		case 3:
 			if (do_paired_load) {
-				buflo = current->thread.fpu.hard.fp_regs[srcreg];
-				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+				buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+				bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 			} else {
 #if defined(CONFIG_CPU_LITTLE_ENDIAN)
-				bufhi = current->thread.fpu.hard.fp_regs[srcreg];
-				buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
+				bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
+				buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 #else
-				buflo = current->thread.fpu.hard.fp_regs[srcreg];
-				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+				buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+				bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 #endif
 			}
 			break;
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index a1e4ec2..7f8a709 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -3,7 +3,7 @@
  * Written by Niibe Yutaka and Paul Mundt
  */
 #ifdef CONFIG_SUPERH64
-#define LOAD_OFFSET	CONFIG_PAGE_OFFSET
+#define LOAD_OFFSET	PAGE_OFFSET
 OUTPUT_ARCH(sh:sh5)
 #else
 #define LOAD_OFFSET	0
@@ -14,17 +14,16 @@
 #include <asm/cache.h>
 #include <asm/vmlinux.lds.h>
 
+#ifdef CONFIG_PMB
+ #define MEMORY_OFFSET	0
+#else
+ #define MEMORY_OFFSET	__MEMORY_START
+#endif
+
 ENTRY(_start)
 SECTIONS
 {
-#ifdef CONFIG_PMB_FIXED
-	. = CONFIG_PAGE_OFFSET + (CONFIG_MEMORY_START & 0x1fffffff) +
-	    CONFIG_ZERO_PAGE_OFFSET;
-#elif defined(CONFIG_32BIT)
-	. = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
-#else
-	. = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
-#endif
+	. = PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
 
 	_text = .;		/* Text and read-only data */
 
@@ -35,12 +34,7 @@
 	.text : AT(ADDR(.text) - LOAD_OFFSET) {
 		HEAD_TEXT
 		TEXT_TEXT
-
-#ifdef CONFIG_SUPERH64
-		*(.text64)
-		*(.text..SHmedia32)
-#endif
-
+		EXTRA_TEXT
 		SCHED_TEXT
 		LOCK_TEXT
 		KPROBES_TEXT
@@ -51,24 +45,12 @@
 	} = 0x0009
 
 	EXCEPTION_TABLE(16)
-
 	NOTES
+
+	_sdata = .;
 	RO_DATA(PAGE_SIZE)
-
-	/*
-	 * Code which must be executed uncached and the associated data
-	 */
-	. = ALIGN(PAGE_SIZE);
-	.uncached : AT(ADDR(.uncached) - LOAD_OFFSET) {
-		__uncached_start = .;
-		*(.uncached.text)
-		*(.uncached.data)
-		__uncached_end = .;
-	}
-
 	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
-
-	_edata = .;			/* End of data section */
+	_edata = .;
 
 	DWARF_EH_FRAME
 
diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c
index d6c15ca..1fcdb12 100644
--- a/arch/sh/math-emu/math.c
+++ b/arch/sh/math-emu/math.c
@@ -471,10 +471,10 @@
  *	denormal_to_double - Given denormalized float number,
  *	                     store double float
  *
- *	@fpu: Pointer to sh_fpu_hard structure
+ *	@fpu: Pointer to sh_fpu_soft structure
  *	@n: Index to FP register
  */
-static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n)
+static void denormal_to_double(struct sh_fpu_soft_struct *fpu, int n)
 {
 	unsigned long du, dl;
 	unsigned long x = fpu->fpul;
@@ -552,11 +552,11 @@
 	if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
 		struct task_struct *tsk = current;
 
-		if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) {
+		if ((tsk->thread.xstate->softfpu.fpscr & (1 << 17))) {
 			/* FPU error */
-			denormal_to_double (&tsk->thread.fpu.hard,
+			denormal_to_double (&tsk->thread.xstate->softfpu,
 					    (finsn >> 8) & 0xf);
-			tsk->thread.fpu.hard.fpscr &=
+			tsk->thread.xstate->softfpu.fpscr &=
 				~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
 			task_thread_info(tsk)->status |= TS_USEDFPU;
 		} else {
@@ -617,7 +617,7 @@
 int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
 {
 	struct task_struct *tsk = current;
-	struct sh_fpu_soft_struct *fpu = &(tsk->thread.fpu.soft);
+	struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
 
 	if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
 		/* initialize once. */
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 986a71b..1445ca6 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -75,52 +75,25 @@
 config 29BIT
 	def_bool !32BIT
 	depends on SUPERH32
+	select UNCACHED_MAPPING
 
 config 32BIT
 	bool
 	default y if CPU_SH5
 
-config PMB_ENABLE
+config PMB
 	bool "Support 32-bit physical addressing through PMB"
 	depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
-	help
-	  If you say Y here, physical addressing will be extended to
-	  32-bits through the SH-4A PMB. If this is not set, legacy
-	  29-bit physical addressing will be used.
-
-choice
-	prompt "PMB handling type"
-	depends on PMB_ENABLE
-	default PMB_FIXED
-
-config PMB
-	bool "PMB"
-	depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
-	help
-	  If you say Y here, physical addressing will be extended to
-	  32-bits through the SH-4A PMB. If this is not set, legacy
-	  29-bit physical addressing will be used.
-
-config PMB_FIXED
-	bool "fixed PMB"
-	depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
 	select 32BIT
+	select UNCACHED_MAPPING
 	help
-	  If this option is enabled, fixed PMB mappings are inherited
-	  from the boot loader, and the kernel does not attempt dynamic
-	  management. This is the closest to legacy 29-bit physical mode,
-	  and allows systems to support up to 512MiB of system memory.
-
-endchoice
+	  If you say Y here, physical addressing will be extended to
+	  32-bits through the SH-4A PMB. If this is not set, legacy
+	  29-bit physical addressing will be used.
 
 config X2TLB
-	bool "Enable extended TLB mode"
-	depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL
-	help
-	  Selecting this option will enable the extended mode of the SH-X2
-	  TLB. For legacy SH-X behaviour and interoperability, say N. For
-	  all of the fun new features and a willingless to submit bug reports,
-	  say Y.
+	def_bool y
+	depends on (CPU_SHX2 || CPU_SHX3) && MMU
 
 config VSYSCALL
 	bool "Support vsyscall page"
@@ -188,14 +161,19 @@
 	def_bool y
 	depends on MEMORY_HOTPLUG
 
+config IOREMAP_FIXED
+       def_bool y
+       depends on X2TLB || SUPERH64
+
+config UNCACHED_MAPPING
+	bool
+
 choice
 	prompt "Kernel page size"
-	default PAGE_SIZE_8KB if X2TLB
 	default PAGE_SIZE_4KB
 
 config PAGE_SIZE_4KB
 	bool "4kB"
-	depends on !MMU || !X2TLB
 	help
 	  This is the default page size used by all SuperH CPUs.
 
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 8a70535..3dc8a8a 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the Linux SuperH-specific parts of the memory manager.
 #
 
-obj-y			:= cache.o init.o consistent.o mmap.o
+obj-y			:= alignment.o cache.o init.o consistent.o mmap.o
 
 cacheops-$(CONFIG_CPU_SH2)		:= cache-sh2.o
 cacheops-$(CONFIG_CPU_SH2A)		:= cache-sh2a.o
@@ -15,7 +15,7 @@
 
 mmu-y			:= nommu.o extable_32.o
 mmu-$(CONFIG_MMU)	:= extable_$(BITS).o fault_$(BITS).o \
-			   ioremap_$(BITS).o kmap.o tlbflush_$(BITS).o
+			   ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o
 
 obj-y			+= $(mmu-y)
 obj-$(CONFIG_DEBUG_FS)	+= asids-debugfs.o
@@ -26,15 +26,17 @@
 
 ifdef CONFIG_MMU
 tlb-$(CONFIG_CPU_SH3)		:= tlb-sh3.o
-tlb-$(CONFIG_CPU_SH4)		:= tlb-sh4.o
+tlb-$(CONFIG_CPU_SH4)		:= tlb-sh4.o tlb-urb.o
 tlb-$(CONFIG_CPU_SH5)		:= tlb-sh5.o
-tlb-$(CONFIG_CPU_HAS_PTEAEX)	:= tlb-pteaex.o
+tlb-$(CONFIG_CPU_HAS_PTEAEX)	:= tlb-pteaex.o tlb-urb.o
 obj-y				+= $(tlb-y)
 endif
 
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
-obj-$(CONFIG_PMB_ENABLE)	+= pmb.o
+obj-$(CONFIG_PMB)		+= pmb.o
 obj-$(CONFIG_NUMA)		+= numa.o
+obj-$(CONFIG_IOREMAP_FIXED)	+= ioremap_fixed.o
+obj-$(CONFIG_UNCACHED_MAPPING)	+= uncached.o
 
 # Special flags for fault_64.o.  This puts restrictions on the number of
 # caller-save registers that the compiler can target when building this file.
diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c
new file mode 100644
index 0000000..b2595b8
--- /dev/null
+++ b/arch/sh/mm/alignment.c
@@ -0,0 +1,189 @@
+/*
+ * Alignment access counters and corresponding user-space interfaces.
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <asm/alignment.h>
+#include <asm/processor.h>
+
+static unsigned long se_user;
+static unsigned long se_sys;
+static unsigned long se_half;
+static unsigned long se_word;
+static unsigned long se_dword;
+static unsigned long se_multi;
+/* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not
+   valid! */
+static int se_usermode = UM_WARN | UM_FIXUP;
+/* 0: no warning 1: print a warning message, disabled by default */
+static int se_kernmode_warn;
+
+core_param(alignment, se_usermode, int, 0600);
+
+void inc_unaligned_byte_access(void)
+{
+	se_half++;
+}
+
+void inc_unaligned_word_access(void)
+{
+	se_word++;
+}
+
+void inc_unaligned_dword_access(void)
+{
+	se_dword++;
+}
+
+void inc_unaligned_multi_access(void)
+{
+	se_multi++;
+}
+
+void inc_unaligned_user_access(void)
+{
+	se_user++;
+}
+
+void inc_unaligned_kernel_access(void)
+{
+	se_sys++;
+}
+
+/*
+ * This defaults to the global policy which can be set from the command
+ * line, while processes can overload their preferences via prctl().
+ */
+unsigned int unaligned_user_action(void)
+{
+	unsigned int action = se_usermode;
+
+	if (current->thread.flags & SH_THREAD_UAC_SIGBUS) {
+		action &= ~UM_FIXUP;
+		action |= UM_SIGNAL;
+	}
+
+	if (current->thread.flags & SH_THREAD_UAC_NOPRINT)
+		action &= ~UM_WARN;
+
+	return action;
+}
+
+int get_unalign_ctl(struct task_struct *tsk, unsigned long addr)
+{
+	return put_user(tsk->thread.flags & SH_THREAD_UAC_MASK,
+			(unsigned int __user *)addr);
+}
+
+int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
+{
+	tsk->thread.flags = (tsk->thread.flags & ~SH_THREAD_UAC_MASK) |
+			    (val & SH_THREAD_UAC_MASK);
+	return 0;
+}
+
+void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn,
+			     struct pt_regs *regs)
+{
+	if (user_mode(regs) && (se_usermode & UM_WARN) && printk_ratelimit())
+		pr_notice("Fixing up unaligned userspace access "
+			  "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+			  tsk->comm, task_pid_nr(tsk),
+			  (void *)instruction_pointer(regs), insn);
+	else if (se_kernmode_warn && printk_ratelimit())
+		pr_notice("Fixing up unaligned kernel access "
+			  "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+			  tsk->comm, task_pid_nr(tsk),
+			  (void *)instruction_pointer(regs), insn);
+}
+
+static const char *se_usermode_action[] = {
+	"ignored",
+	"warn",
+	"fixup",
+	"fixup+warn",
+	"signal",
+	"signal+warn"
+};
+
+static int alignment_proc_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "User:\t\t%lu\n", se_user);
+	seq_printf(m, "System:\t\t%lu\n", se_sys);
+	seq_printf(m, "Half:\t\t%lu\n", se_half);
+	seq_printf(m, "Word:\t\t%lu\n", se_word);
+	seq_printf(m, "DWord:\t\t%lu\n", se_dword);
+	seq_printf(m, "Multi:\t\t%lu\n", se_multi);
+	seq_printf(m, "User faults:\t%i (%s)\n", se_usermode,
+			se_usermode_action[se_usermode]);
+	seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
+			se_kernmode_warn ? "+warn" : "");
+	return 0;
+}
+
+static int alignment_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, alignment_proc_show, NULL);
+}
+
+static ssize_t alignment_proc_write(struct file *file,
+		const char __user *buffer, size_t count, loff_t *pos)
+{
+	int *data = PDE(file->f_path.dentry->d_inode)->data;
+	char mode;
+
+	if (count > 0) {
+		if (get_user(mode, buffer))
+			return -EFAULT;
+		if (mode >= '0' && mode <= '5')
+			*data = mode - '0';
+	}
+	return count;
+}
+
+static const struct file_operations alignment_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= alignment_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= alignment_proc_write,
+};
+
+/*
+ * This needs to be done after sysctl_init, otherwise sys/ will be
+ * overwritten.  Actually, this shouldn't be in sys/ at all since
+ * it isn't a sysctl, and it doesn't contain sysctl information.
+ * We now locate it in /proc/cpu/alignment instead.
+ */
+static int __init alignment_init(void)
+{
+	struct proc_dir_entry *dir, *res;
+
+	dir = proc_mkdir("cpu", NULL);
+	if (!dir)
+		return -ENOMEM;
+
+	res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir,
+			       &alignment_proc_fops, &se_usermode);
+	if (!res)
+		return -ENOMEM;
+
+        res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir,
+			       &alignment_proc_fops, &se_kernmode_warn);
+        if (!res)
+                return -ENOMEM;
+
+	return 0;
+}
+fs_initcall(alignment_init);
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 5ba067b..690ed01 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -22,8 +22,7 @@
 	CACHE_TYPE_UNIFIED,
 };
 
-static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file,
-						  void *iter)
+static int cache_seq_show(struct seq_file *file, void *iter)
 {
 	unsigned int cache_type = (unsigned int)file->private;
 	struct cache_info *cache;
@@ -37,7 +36,7 @@
 	 */
 	jump_to_uncached();
 
-	ccr = ctrl_inl(CCR);
+	ccr = __raw_readl(CCR);
 	if ((ccr & CCR_CACHE_ENABLE) == 0) {
 		back_to_cached();
 
@@ -90,7 +89,7 @@
 		for (addr = addrstart, line = 0;
 		     addr < addrstart + waysize;
 		     addr += cache->linesz, line++) {
-			unsigned long data = ctrl_inl(addr);
+			unsigned long data = __raw_readl(addr);
 
 			/* Check the V bit, ignore invalid cachelines */
 			if ((data & 1) == 0)
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c
index 699a71f..defcf71 100644
--- a/arch/sh/mm/cache-sh2.c
+++ b/arch/sh/mm/cache-sh2.c
@@ -28,10 +28,10 @@
 		unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0);
 		int way;
 		for (way = 0; way < 4; way++) {
-			unsigned long data =  ctrl_inl(addr | (way << 12));
+			unsigned long data =  __raw_readl(addr | (way << 12));
 			if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
 				data &= ~SH_CACHE_UPDATED;
-				ctrl_outl(data, addr | (way << 12));
+				__raw_writel(data, addr | (way << 12));
 			}
 		}
 	}
@@ -47,7 +47,7 @@
 		& ~(L1_CACHE_BYTES-1);
 
 	for (v = begin; v < end; v+=L1_CACHE_BYTES)
-		ctrl_outl((v & CACHE_PHYSADDR_MASK),
+		__raw_writel((v & CACHE_PHYSADDR_MASK),
 			  CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008);
 }
 
@@ -63,9 +63,9 @@
 	local_irq_save(flags);
 	jump_to_uncached();
 
-	ccr = ctrl_inl(CCR);
+	ccr = __raw_readl(CCR);
 	ccr |= CCR_CACHE_INVALIDATE;
-	ctrl_outl(ccr, CCR);
+	__raw_writel(ccr, CCR);
 
 	back_to_cached();
 	local_irq_restore(flags);
@@ -78,7 +78,7 @@
 		& ~(L1_CACHE_BYTES-1);
 
 	for (v = begin; v < end; v+=L1_CACHE_BYTES)
-		ctrl_outl((v & CACHE_PHYSADDR_MASK),
+		__raw_writel((v & CACHE_PHYSADDR_MASK),
 			  CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008);
 #endif
 }
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c
index 975899d..1f51225 100644
--- a/arch/sh/mm/cache-sh2a.c
+++ b/arch/sh/mm/cache-sh2a.c
@@ -32,10 +32,10 @@
 		unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0);
 		int way;
 		for (way = 0; way < 4; way++) {
-			unsigned long data =  ctrl_inl(addr | (way << 11));
+			unsigned long data =  __raw_readl(addr | (way << 11));
 			if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
 				data &= ~SH_CACHE_UPDATED;
-				ctrl_outl(data, addr | (way << 11));
+				__raw_writel(data, addr | (way << 11));
 			}
 		}
 	}
@@ -58,7 +58,7 @@
 	jump_to_uncached();
 
 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
-		ctrl_outl((v & CACHE_PHYSADDR_MASK),
+		__raw_writel((v & CACHE_PHYSADDR_MASK),
 			  CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
 	}
 	back_to_cached();
@@ -78,17 +78,17 @@
 	jump_to_uncached();
 
 #ifdef CONFIG_CACHE_WRITEBACK
-	ctrl_outl(ctrl_inl(CCR) | CCR_OCACHE_INVALIDATE, CCR);
+	__raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR);
 	/* I-cache invalidate */
 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
-		ctrl_outl((v & CACHE_PHYSADDR_MASK),
+		__raw_writel((v & CACHE_PHYSADDR_MASK),
 			  CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
 	}
 #else
 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
-		ctrl_outl((v & CACHE_PHYSADDR_MASK),
+		__raw_writel((v & CACHE_PHYSADDR_MASK),
 			  CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
-		ctrl_outl((v & CACHE_PHYSADDR_MASK),
+		__raw_writel((v & CACHE_PHYSADDR_MASK),
 			  CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
 	}
 #endif
@@ -115,14 +115,14 @@
 		int way;
 		/* O-Cache writeback */
 		for (way = 0; way < 4; way++) {
-			unsigned long data =  ctrl_inl(CACHE_OC_ADDRESS_ARRAY | addr | (way << 11));
+			unsigned long data =  __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr | (way << 11));
 			if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
 				data &= ~SH_CACHE_UPDATED;
-				ctrl_outl(data, CACHE_OC_ADDRESS_ARRAY | addr | (way << 11));
+				__raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr | (way << 11));
 			}
 		}
 		/* I-Cache invalidate */
-		ctrl_outl(addr,
+		__raw_writel(addr,
 			  CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008);
 	}
 
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c
index faef80c..e37523f 100644
--- a/arch/sh/mm/cache-sh3.c
+++ b/arch/sh/mm/cache-sh3.c
@@ -50,12 +50,12 @@
 			p = __pa(v);
 			addr = addrstart | (v & current_cpu_data.dcache.entry_mask);
 			local_irq_save(flags);
-			data = ctrl_inl(addr);
+			data = __raw_readl(addr);
 
 			if ((data & CACHE_PHYSADDR_MASK) ==
 			    (p & CACHE_PHYSADDR_MASK)) {
 				data &= ~SH_CACHE_UPDATED;
-				ctrl_outl(data, addr);
+				__raw_writel(data, addr);
 				local_irq_restore(flags);
 				break;
 			}
@@ -86,7 +86,7 @@
 		data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */
 		addr = CACHE_OC_ADDRESS_ARRAY |
 			(v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC;
-		ctrl_outl(data, addr);
+		__raw_writel(data, addr);
 	}
 }
 
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 560ddb6..2cfae81 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -36,7 +36,7 @@
  * Called from kernel/module.c:sys_init_module and routine for a.out format,
  * signal handler code and kprobes code
  */
-static void __uses_jump_to_uncached sh4_flush_icache_range(void *args)
+static void sh4_flush_icache_range(void *args)
 {
 	struct flusher_data *data = args;
 	unsigned long start, end;
@@ -109,6 +109,7 @@
 static void sh4_flush_dcache_page(void *arg)
 {
 	struct page *page = arg;
+	unsigned long addr = (unsigned long)page_address(page);
 #ifndef CONFIG_SMP
 	struct address_space *mapping = page_mapping(page);
 
@@ -116,22 +117,14 @@
 		set_bit(PG_dcache_dirty, &page->flags);
 	else
 #endif
-	{
-		unsigned long phys = page_to_phys(page);
-		unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
-		int i, n;
-
-		/* Loop all the D-cache */
-		n = boot_cpu_data.dcache.n_aliases;
-		for (i = 0; i < n; i++, addr += PAGE_SIZE)
-			flush_cache_one(addr, phys);
-	}
+		flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
+				(addr & shm_align_mask), page_to_phys(page));
 
 	wmb();
 }
 
 /* TODO: Selective icache invalidation through IC address array.. */
-static void __uses_jump_to_uncached flush_icache_all(void)
+static void flush_icache_all(void)
 {
 	unsigned long flags, ccr;
 
@@ -139,9 +132,9 @@
 	jump_to_uncached();
 
 	/* Flush I-cache */
-	ccr = ctrl_inl(CCR);
+	ccr = __raw_readl(CCR);
 	ccr |= CCR_CACHE_ICI;
-	ctrl_outl(ccr, CCR);
+	__raw_writel(ccr, CCR);
 
 	/*
 	 * back_to_cached() will take care of the barrier for us, don't add
@@ -384,9 +377,9 @@
 void __init sh4_cache_init(void)
 {
 	printk("PVR=%08x CVR=%08x PRR=%08x\n",
-		ctrl_inl(CCN_PVR),
-		ctrl_inl(CCN_CVR),
-		ctrl_inl(CCN_PRR));
+		__raw_readl(CCN_PVR),
+		__raw_readl(CCN_CVR),
+		__raw_readl(CCN_PRR));
 
 	local_flush_icache_range	= sh4_flush_icache_range;
 	local_flush_dcache_page		= sh4_flush_dcache_page;
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index f527fb7..f498da1 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -48,10 +48,10 @@
 			unsigned long data;
 			int v = SH_CACHE_UPDATED | SH_CACHE_VALID;
 
-			data = ctrl_inl(addr);
+			data = __raw_readl(addr);
 
 			if ((data & v) == v)
-				ctrl_outl(data & ~v, addr);
+				__raw_writel(data & ~v, addr);
 
 		}
 
@@ -78,7 +78,7 @@
 /*
  * Writeback&Invalidate the D-cache of the page
  */
-static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys)
+static void __flush_dcache_page(unsigned long phys)
 {
 	unsigned long ways, waysize, addrstart;
 	unsigned long flags;
@@ -115,10 +115,10 @@
 		     addr += current_cpu_data.dcache.linesz) {
 			unsigned long data;
 
-			data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
+			data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
 		        if (data == phys) {
 				data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED);
-				ctrl_outl(data, addr);
+				__raw_writel(data, addr);
 			}
 		}
 
@@ -144,7 +144,7 @@
 		__flush_dcache_page(__pa(page_address(page)));
 }
 
-static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args)
+static void sh7705_flush_cache_all(void *args)
 {
 	unsigned long flags;
 
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index b8607fa..0f4095d 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -2,7 +2,7 @@
  * arch/sh/mm/cache.c
  *
  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
- * Copyright (C) 2002 - 2009  Paul Mundt
+ * Copyright (C) 2002 - 2010  Paul Mundt
  *
  * Released under the terms of the GNU GPL v2.0.
  */
@@ -41,8 +41,17 @@
                                    int wait)
 {
 	preempt_disable();
-	smp_call_function(func, info, wait);
+
+	/*
+	 * It's possible that this gets called early on when IRQs are
+	 * still disabled due to ioremapping by the boot CPU, so don't
+	 * even attempt IPIs unless there are other CPUs online.
+	 */
+	if (num_online_cpus() > 1)
+		smp_call_function(func, info, wait);
+
 	func(info);
+
 	preempt_enable();
 }
 
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 4753010..28e2283 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -53,6 +53,9 @@
 	if (!pud_present(*pud_k))
 		return NULL;
 
+	if (!pud_present(*pud))
+	    set_pud(pud, *pud_k);
+
 	pmd = pmd_offset(pud, address);
 	pmd_k = pmd_offset(pud_k, address);
 	if (!pmd_present(*pmd_k))
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 432acd0..68028e8 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -21,25 +21,13 @@
 #include <asm/cacheflush.h>
 #include <asm/sections.h>
 #include <asm/cache.h>
+#include <asm/sizes.h>
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
-#ifdef CONFIG_SUPERH32
-/*
- * Handle trivial transitions between cached and uncached
- * segments, making use of the 1:1 mapping relationship in
- * 512MB lowmem.
- *
- * This is the offset of the uncached section from its cached alias.
- * Default value only valid in 29 bit mode, in 32bit mode will be
- * overridden in pmb_init.
- */
-unsigned long cached_to_uncached = P2SEG - P1SEG;
-#endif
-
 #ifdef CONFIG_MMU
-static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
+static pte_t *__get_pte_phys(unsigned long addr)
 {
 	pgd_t *pgd;
 	pud_t *pud;
@@ -49,22 +37,30 @@
 	pgd = pgd_offset_k(addr);
 	if (pgd_none(*pgd)) {
 		pgd_ERROR(*pgd);
-		return;
+		return NULL;
 	}
 
 	pud = pud_alloc(NULL, pgd, addr);
 	if (unlikely(!pud)) {
 		pud_ERROR(*pud);
-		return;
+		return NULL;
 	}
 
 	pmd = pmd_alloc(NULL, pud, addr);
 	if (unlikely(!pmd)) {
 		pmd_ERROR(*pmd);
-		return;
+		return NULL;
 	}
 
 	pte = pte_offset_kernel(pmd, addr);
+	return pte;
+}
+
+static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
+{
+	pte_t *pte;
+
+	pte = __get_pte_phys(addr);
 	if (!pte_none(*pte)) {
 		pte_ERROR(*pte);
 		return;
@@ -72,23 +68,24 @@
 
 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
 	local_flush_tlb_one(get_asid(), addr);
+
+	if (pgprot_val(prot) & _PAGE_WIRED)
+		tlb_wire_entry(NULL, addr, *pte);
 }
 
-/*
- * As a performance optimization, other platforms preserve the fixmap mapping
- * across a context switch, we don't presently do this, but this could be done
- * in a similar fashion as to the wired TLB interface that sh64 uses (by way
- * of the memory mapped UTLB configuration) -- this unfortunately forces us to
- * give up a TLB entry for each mapping we want to preserve. While this may be
- * viable for a small number of fixmaps, it's not particularly useful for
- * everything and needs to be carefully evaluated. (ie, we may want this for
- * the vsyscall page).
- *
- * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
- * in at __set_fixmap() time to determine the appropriate behavior to follow.
- *
- *					 -- PFM.
- */
+static void clear_pte_phys(unsigned long addr, pgprot_t prot)
+{
+	pte_t *pte;
+
+	pte = __get_pte_phys(addr);
+
+	if (pgprot_val(prot) & _PAGE_WIRED)
+		tlb_unwire_entry();
+
+	set_pte(pte, pfn_pte(0, __pgprot(0)));
+	local_flush_tlb_one(get_asid(), addr);
+}
+
 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
 {
 	unsigned long address = __fix_to_virt(idx);
@@ -101,6 +98,18 @@
 	set_pte_phys(address, phys, prot);
 }
 
+void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
+{
+	unsigned long address = __fix_to_virt(idx);
+
+	if (idx >= __end_of_fixed_addresses) {
+		BUG();
+		return;
+	}
+
+	clear_pte_phys(address, prot);
+}
+
 void __init page_table_range_init(unsigned long start, unsigned long end,
 					 pgd_t *pgd_base)
 {
@@ -120,7 +129,13 @@
 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
 		pud = (pud_t *)pgd;
 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
+#ifdef __PAGETABLE_PMD_FOLDED
 			pmd = (pmd_t *)pud;
+#else
+			pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+			pud_populate(&init_mm, pud, pmd);
+			pmd += k;
+#endif
 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
 				if (pmd_none(*pmd)) {
 					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
@@ -182,9 +197,6 @@
 	}
 
 	free_area_init_nodes(max_zone_pfns);
-
-	/* Set up the uncached fixmap */
-	set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
 }
 
 /*
@@ -195,6 +207,8 @@
 	no_iommu_init();
 }
 
+unsigned int mem_init_done = 0;
+
 void __init mem_init(void)
 {
 	int codesize, datasize, initsize;
@@ -231,6 +245,8 @@
 	memset(empty_zero_page, 0, PAGE_SIZE);
 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
 
+	vsyscall_init();
+
 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
@@ -243,8 +259,48 @@
 		datasize >> 10,
 		initsize >> 10);
 
-	/* Initialize the vDSO */
-	vsyscall_init();
+	printk(KERN_INFO "virtual kernel memory layout:\n"
+		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+#ifdef CONFIG_HIGHMEM
+		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+#endif
+		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
+#ifdef CONFIG_UNCACHED_MAPPING
+		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
+#endif
+		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
+		FIXADDR_START, FIXADDR_TOP,
+		(FIXADDR_TOP - FIXADDR_START) >> 10,
+
+#ifdef CONFIG_HIGHMEM
+		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
+		(LAST_PKMAP*PAGE_SIZE) >> 10,
+#endif
+
+		(unsigned long)VMALLOC_START, VMALLOC_END,
+		(VMALLOC_END - VMALLOC_START) >> 20,
+
+		(unsigned long)memory_start, (unsigned long)high_memory,
+		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
+
+#ifdef CONFIG_UNCACHED_MAPPING
+		uncached_start, uncached_end, uncached_size >> 20,
+#endif
+
+		(unsigned long)&__init_begin, (unsigned long)&__init_end,
+		((unsigned long)&__init_end -
+		 (unsigned long)&__init_begin) >> 10,
+
+		(unsigned long)&_etext, (unsigned long)&_edata,
+		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
+
+		(unsigned long)&_text, (unsigned long)&_etext,
+		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+
+	mem_init_done = 1;
 }
 
 void free_initmem(void)
@@ -277,35 +333,6 @@
 }
 #endif
 
-#if THREAD_SHIFT < PAGE_SHIFT
-static struct kmem_cache *thread_info_cache;
-
-struct thread_info *alloc_thread_info(struct task_struct *tsk)
-{
-	struct thread_info *ti;
-
-	ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
-	if (unlikely(ti == NULL))
-		return NULL;
-#ifdef CONFIG_DEBUG_STACK_USAGE
-	memset(ti, 0, THREAD_SIZE);
-#endif
-	return ti;
-}
-
-void free_thread_info(struct thread_info *ti)
-{
-	kmem_cache_free(thread_info_cache, ti);
-}
-
-void thread_info_cache_init(void)
-{
-	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
-					      THREAD_SIZE, 0, NULL);
-	BUG_ON(thread_info_cache == NULL);
-}
-#endif /* THREAD_SHIFT < PAGE_SHIFT */
-
 #ifdef CONFIG_MEMORY_HOTPLUG
 int arch_add_memory(int nid, u64 start, u64 size)
 {
@@ -336,10 +363,3 @@
 #endif
 
 #endif /* CONFIG_MEMORY_HOTPLUG */
-
-#ifdef CONFIG_PMB
-int __in_29bit_mode(void)
-{
-	return !(ctrl_inl(PMB_PASCR) & PASCR_SE);
-}
-#endif /* CONFIG_PMB */
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap.c
similarity index 78%
rename from arch/sh/mm/ioremap_32.c
rename to arch/sh/mm/ioremap.c
index 2141bef..c68d2d7 100644
--- a/arch/sh/mm/ioremap_32.c
+++ b/arch/sh/mm/ioremap.c
@@ -1,13 +1,13 @@
 /*
  * arch/sh/mm/ioremap.c
  *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ * (C) Copyright 2005 - 2010  Paul Mundt
+ *
  * Re-map IO memory to kernel address space so that we can access it.
  * This is needed for high PCI addresses that aren't mapped in the
  * 640k-1MB IO memory area on PC's
  *
- * (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2005, 2006 Paul Mundt
- *
  * This file is subject to the terms and conditions of the GNU General
  * Public License. See the file "COPYING" in the main directory of this
  * archive for more details.
@@ -33,12 +33,12 @@
  * have to convert them into an offset in a page-aligned mapping, but the
  * caller shouldn't need to know that small detail.
  */
-void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size,
-			       unsigned long flags, void *caller)
+void __iomem * __init_refok
+__ioremap_caller(unsigned long phys_addr, unsigned long size,
+		 pgprot_t pgprot, void *caller)
 {
 	struct vm_struct *area;
 	unsigned long offset, last_addr, addr, orig_addr;
-	pgprot_t pgprot;
 
 	/* Don't allow wraparound or zero size */
 	last_addr = phys_addr + size - 1;
@@ -46,18 +46,6 @@
 		return NULL;
 
 	/*
-	 * If we're in the fixed PCI memory range, mapping through page
-	 * tables is not only pointless, but also fundamentally broken.
-	 * Just return the physical address instead.
-	 *
-	 * For boards that map a small PCI memory aperture somewhere in
-	 * P1/P2 space, ioremap() will already do the right thing,
-	 * and we'll never get this far.
-	 */
-	if (is_pci_memory_fixed_range(phys_addr, size))
-		return (void __iomem *)phys_addr;
-
-	/*
 	 * Mappings have to be page-aligned
 	 */
 	offset = phys_addr & ~PAGE_MASK;
@@ -65,6 +53,12 @@
 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
 
 	/*
+	 * If we can't yet use the regular approach, go the fixmap route.
+	 */
+	if (!mem_init_done)
+		return ioremap_fixed(phys_addr, offset, size, pgprot);
+
+	/*
 	 * Ok, go for it..
 	 */
 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
@@ -84,8 +78,9 @@
 	 * PMB entries are all pre-faulted.
 	 */
 	if (unlikely(phys_addr >= P1SEG)) {
-		unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
+		unsigned long mapped;
 
+		mapped = pmb_remap(addr, phys_addr, size, pgprot);
 		if (likely(mapped)) {
 			addr		+= mapped;
 			phys_addr	+= mapped;
@@ -94,7 +89,6 @@
 	}
 #endif
 
-	pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
 	if (likely(size))
 		if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
 			vunmap((void *)orig_addr);
@@ -105,15 +99,38 @@
 }
 EXPORT_SYMBOL(__ioremap_caller);
 
+/*
+ * Simple checks for non-translatable mappings.
+ */
+static inline int iomapping_nontranslatable(unsigned long offset)
+{
+#ifdef CONFIG_29BIT
+	/*
+	 * In 29-bit mode this includes the fixed P1/P2 areas, as well as
+	 * parts of P3.
+	 */
+	if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX)
+		return 1;
+#endif
+
+	return 0;
+}
+
 void __iounmap(void __iomem *addr)
 {
 	unsigned long vaddr = (unsigned long __force)addr;
-	unsigned long seg = PXSEG(vaddr);
 	struct vm_struct *p;
 
-	if (seg < P3SEG || vaddr >= P3_ADDR_MAX)
+	/*
+	 * Nothing to do if there is no translatable mapping.
+	 */
+	if (iomapping_nontranslatable(vaddr))
 		return;
-	if (is_pci_memory_fixed_range(vaddr, 0))
+
+	/*
+	 * There's no VMA if it's from an early fixed mapping.
+	 */
+	if (iounmap_fixed(addr) == 0)
 		return;
 
 #ifdef CONFIG_PMB
diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c
deleted file mode 100644
index ef43465..0000000
--- a/arch/sh/mm/ioremap_64.c
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * arch/sh/mm/ioremap_64.c
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003 - 2007  Paul Mundt
- *
- * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
- * derived from arch/i386/mm/ioremap.c .
- *
- *   (C) Copyright 1995 1996 Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/vmalloc.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/io.h>
-#include <linux/bootmem.h>
-#include <linux/proc_fs.h>
-#include <linux/slab.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/addrspace.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu.h>
-
-static struct resource shmedia_iomap = {
-	.name	= "shmedia_iomap",
-	.start	= IOBASE_VADDR + PAGE_SIZE,
-	.end	= IOBASE_END - 1,
-};
-
-static void shmedia_mapioaddr(unsigned long pa, unsigned long va,
-			      unsigned long flags);
-static void shmedia_unmapioaddr(unsigned long vaddr);
-static void __iomem *shmedia_ioremap(struct resource *res, u32 pa,
-				     int sz, unsigned long flags);
-
-/*
- * We have the same problem as the SPARC, so lets have the same comment:
- * Our mini-allocator...
- * Boy this is gross! We need it because we must map I/O for
- * timers and interrupt controller before the kmalloc is available.
- */
-
-#define XNMLN  15
-#define XNRES  10
-
-struct xresource {
-	struct resource xres;   /* Must be first */
-	int xflag;              /* 1 == used */
-	char xname[XNMLN+1];
-};
-
-static struct xresource xresv[XNRES];
-
-static struct xresource *xres_alloc(void)
-{
-	struct xresource *xrp;
-	int n;
-
-	xrp = xresv;
-	for (n = 0; n < XNRES; n++) {
-		if (xrp->xflag == 0) {
-			xrp->xflag = 1;
-			return xrp;
-		}
-		xrp++;
-	}
-	return NULL;
-}
-
-static void xres_free(struct xresource *xrp)
-{
-	xrp->xflag = 0;
-}
-
-static struct resource *shmedia_find_resource(struct resource *root,
-					      unsigned long vaddr)
-{
-	struct resource *res;
-
-	for (res = root->child; res; res = res->sibling)
-		if (res->start <= vaddr && res->end >= vaddr)
-			return res;
-
-	return NULL;
-}
-
-static void __iomem *shmedia_alloc_io(unsigned long phys, unsigned long size,
-				      const char *name, unsigned long flags)
-{
-	struct xresource *xres;
-	struct resource *res;
-	char *tack;
-	int tlen;
-
-	if (name == NULL)
-		name = "???";
-
-	xres = xres_alloc();
-	if (xres != 0) {
-		tack = xres->xname;
-		res = &xres->xres;
-	} else {
-		printk_once(KERN_NOTICE "%s: done with statics, "
-			       "switching to kmalloc\n", __func__);
-		tlen = strlen(name);
-		tack = kmalloc(sizeof(struct resource) + tlen + 1, GFP_KERNEL);
-		if (!tack)
-			return NULL;
-		memset(tack, 0, sizeof(struct resource));
-		res = (struct resource *) tack;
-		tack += sizeof(struct resource);
-	}
-
-	strncpy(tack, name, XNMLN);
-	tack[XNMLN] = 0;
-	res->name = tack;
-
-	return shmedia_ioremap(res, phys, size, flags);
-}
-
-static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, int sz,
-				     unsigned long flags)
-{
-	unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
-	unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK;
-	unsigned long va;
-	unsigned int psz;
-
-	if (allocate_resource(&shmedia_iomap, res, round_sz,
-			      shmedia_iomap.start, shmedia_iomap.end,
-			      PAGE_SIZE, NULL, NULL) != 0) {
-		panic("alloc_io_res(%s): cannot occupy\n",
-		      (res->name != NULL) ? res->name : "???");
-	}
-
-	va = res->start;
-	pa &= PAGE_MASK;
-
-	psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
-
-	for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
-		shmedia_mapioaddr(pa, va, flags);
-		va += PAGE_SIZE;
-		pa += PAGE_SIZE;
-	}
-
-	return (void __iomem *)(unsigned long)(res->start + offset);
-}
-
-static void shmedia_free_io(struct resource *res)
-{
-	unsigned long len = res->end - res->start + 1;
-
-	BUG_ON((len & (PAGE_SIZE - 1)) != 0);
-
-	while (len) {
-		len -= PAGE_SIZE;
-		shmedia_unmapioaddr(res->start + len);
-	}
-
-	release_resource(res);
-}
-
-static __init_refok void *sh64_get_page(void)
-{
-	void *page;
-
-	if (slab_is_available())
-		page = (void *)get_zeroed_page(GFP_KERNEL);
-	else
-		page = alloc_bootmem_pages(PAGE_SIZE);
-
-	if (!page || ((unsigned long)page & ~PAGE_MASK))
-		panic("sh64_get_page: Out of memory already?\n");
-
-	return page;
-}
-
-static void shmedia_mapioaddr(unsigned long pa, unsigned long va,
-			      unsigned long flags)
-{
-	pgd_t *pgdp;
-	pud_t *pudp;
-	pmd_t *pmdp;
-	pte_t *ptep, pte;
-	pgprot_t prot;
-
-	pr_debug("shmedia_mapiopage pa %08lx va %08lx\n",  pa, va);
-
-	if (!flags)
-		flags = 1; /* 1 = CB0-1 device */
-
-	pgdp = pgd_offset_k(va);
-	if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
-		pudp = (pud_t *)sh64_get_page();
-		set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
-	}
-
-	pudp = pud_offset(pgdp, va);
-	if (pud_none(*pudp) || !pud_present(*pudp)) {
-		pmdp = (pmd_t *)sh64_get_page();
-		set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
-	}
-
-	pmdp = pmd_offset(pudp, va);
-	if (pmd_none(*pmdp) || !pmd_present(*pmdp)) {
-		ptep = (pte_t *)sh64_get_page();
-		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
-	}
-
-	prot = __pgprot(_PAGE_PRESENT | _PAGE_READ     | _PAGE_WRITE  |
-			_PAGE_DIRTY   | _PAGE_ACCESSED | _PAGE_SHARED | flags);
-
-	pte = pfn_pte(pa >> PAGE_SHIFT, prot);
-	ptep = pte_offset_kernel(pmdp, va);
-
-	if (!pte_none(*ptep) &&
-	    pte_val(*ptep) != pte_val(pte))
-		pte_ERROR(*ptep);
-
-	set_pte(ptep, pte);
-
-	flush_tlb_kernel_range(va, PAGE_SIZE);
-}
-
-static void shmedia_unmapioaddr(unsigned long vaddr)
-{
-	pgd_t *pgdp;
-	pud_t *pudp;
-	pmd_t *pmdp;
-	pte_t *ptep;
-
-	pgdp = pgd_offset_k(vaddr);
-	if (pgd_none(*pgdp) || pgd_bad(*pgdp))
-		return;
-
-	pudp = pud_offset(pgdp, vaddr);
-	if (pud_none(*pudp) || pud_bad(*pudp))
-		return;
-
-	pmdp = pmd_offset(pudp, vaddr);
-	if (pmd_none(*pmdp) || pmd_bad(*pmdp))
-		return;
-
-	ptep = pte_offset_kernel(pmdp, vaddr);
-
-	if (pte_none(*ptep) || !pte_present(*ptep))
-		return;
-
-	clear_page((void *)ptep);
-	pte_clear(&init_mm, vaddr, ptep);
-}
-
-void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
-			       unsigned long flags, void *caller)
-{
-	char name[14];
-
-	sprintf(name, "phys_%08x", (u32)offset);
-	return shmedia_alloc_io(offset, size, name, flags);
-}
-EXPORT_SYMBOL(__ioremap_caller);
-
-void __iounmap(void __iomem *virtual)
-{
-	unsigned long vaddr = (unsigned long)virtual & PAGE_MASK;
-	struct resource *res;
-	unsigned int psz;
-
-	res = shmedia_find_resource(&shmedia_iomap, vaddr);
-	if (!res) {
-		printk(KERN_ERR "%s: Failed to free 0x%08lx\n",
-		       __func__, vaddr);
-		return;
-	}
-
-	psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
-
-	shmedia_free_io(res);
-
-	if ((char *)res >= (char *)xresv &&
-	    (char *)res <  (char *)&xresv[XNRES]) {
-		xres_free((struct xresource *)res);
-	} else {
-		kfree(res);
-	}
-}
-EXPORT_SYMBOL(__iounmap);
-
-static int
-ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof,
-		  void *data)
-{
-	char *p = buf, *e = buf + length;
-	struct resource *r;
-	const char *nm;
-
-	for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
-		if (p + 32 >= e)        /* Better than nothing */
-			break;
-		nm = r->name;
-		if (nm == NULL)
-			nm = "???";
-
-		p += sprintf(p, "%08lx-%08lx: %s\n",
-			     (unsigned long)r->start,
-			     (unsigned long)r->end, nm);
-	}
-
-	return p-buf;
-}
-
-static int __init register_proc_onchip(void)
-{
-	create_proc_read_entry("io_map", 0, 0, ioremap_proc_info,
-			       &shmedia_iomap);
-	return 0;
-}
-late_initcall(register_proc_onchip);
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c
new file mode 100644
index 0000000..0b78b1e
--- /dev/null
+++ b/arch/sh/mm/ioremap_fixed.c
@@ -0,0 +1,128 @@
+/*
+ * Re-map IO memory to kernel address space so that we can access it.
+ *
+ * These functions should only be used when it is necessary to map a
+ * physical address space into the kernel address space before ioremap()
+ * can be used, e.g. early in boot before paging_init().
+ *
+ * Copyright (C) 2009  Matt Fleming
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/bootmem.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <asm/fixmap.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/addrspace.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+
+struct ioremap_map {
+	void __iomem *addr;
+	unsigned long size;
+	unsigned long fixmap_addr;
+};
+
+static struct ioremap_map ioremap_maps[FIX_N_IOREMAPS];
+
+void __init ioremap_fixed_init(void)
+{
+	struct ioremap_map *map;
+	int i;
+
+	for (i = 0; i < FIX_N_IOREMAPS; i++) {
+		map = &ioremap_maps[i];
+		map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i);
+	}
+}
+
+void __init __iomem *
+ioremap_fixed(resource_size_t phys_addr, unsigned long offset,
+	      unsigned long size, pgprot_t prot)
+{
+	enum fixed_addresses idx0, idx;
+	struct ioremap_map *map;
+	unsigned int nrpages;
+	int i, slot;
+
+	slot = -1;
+	for (i = 0; i < FIX_N_IOREMAPS; i++) {
+		map = &ioremap_maps[i];
+		if (!map->addr) {
+			map->size = size;
+			slot = i;
+			break;
+		}
+	}
+
+	if (slot < 0)
+		return NULL;
+
+	/*
+	 * Mappings have to fit in the FIX_IOREMAP area.
+	 */
+	nrpages = size >> PAGE_SHIFT;
+	if (nrpages > FIX_N_IOREMAPS)
+		return NULL;
+
+	/*
+	 * Ok, go for it..
+	 */
+	idx0 = FIX_IOREMAP_BEGIN + slot;
+	idx = idx0;
+	while (nrpages > 0) {
+		pgprot_val(prot) |= _PAGE_WIRED;
+		__set_fixmap(idx, phys_addr, prot);
+		phys_addr += PAGE_SIZE;
+		idx++;
+		--nrpages;
+	}
+
+	map->addr = (void __iomem *)(offset + map->fixmap_addr);
+	return map->addr;
+}
+
+int iounmap_fixed(void __iomem *addr)
+{
+	enum fixed_addresses idx;
+	struct ioremap_map *map;
+	unsigned int nrpages;
+	int i, slot;
+
+	slot = -1;
+	for (i = 0; i < FIX_N_IOREMAPS; i++) {
+		map = &ioremap_maps[i];
+		if (map->addr == addr) {
+			slot = i;
+			break;
+		}
+	}
+
+	/*
+	 * If we don't match, it's not for us.
+	 */
+	if (slot < 0)
+		return -EINVAL;
+
+	nrpages = map->size >> PAGE_SHIFT;
+
+	idx = FIX_IOREMAP_BEGIN + slot + nrpages - 1;
+	while (nrpages > 0) {
+		__clear_fixmap(idx, __pgprot(_PAGE_WIRED));
+		--idx;
+		--nrpages;
+	}
+
+	map->size = 0;
+	map->addr = NULL;
+
+	return 0;
+}
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c
index ac16c05..7694f50 100644
--- a/arch/sh/mm/nommu.c
+++ b/arch/sh/mm/nommu.c
@@ -94,3 +94,7 @@
 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
 {
 }
+
+void pgtable_cache_init(void)
+{
+}
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c
new file mode 100644
index 0000000..6f21fb1
--- /dev/null
+++ b/arch/sh/mm/pgtable.c
@@ -0,0 +1,56 @@
+#include <linux/mm.h>
+
+#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
+
+static struct kmem_cache *pgd_cachep;
+#if PAGETABLE_LEVELS > 2
+static struct kmem_cache *pmd_cachep;
+#endif
+
+void pgd_ctor(void *x)
+{
+	pgd_t *pgd = x;
+
+	memcpy(pgd + USER_PTRS_PER_PGD,
+	       swapper_pg_dir + USER_PTRS_PER_PGD,
+	       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
+void pgtable_cache_init(void)
+{
+	pgd_cachep = kmem_cache_create("pgd_cache",
+				       PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
+				       PAGE_SIZE, SLAB_PANIC, pgd_ctor);
+#if PAGETABLE_LEVELS > 2
+	pmd_cachep = kmem_cache_create("pmd_cache",
+				       PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
+				       PAGE_SIZE, SLAB_PANIC, NULL);
+#endif
+}
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
+}
+
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	kmem_cache_free(pgd_cachep, pgd);
+}
+
+#if PAGETABLE_LEVELS > 2
+void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+	set_pud(pud, __pud((unsigned long)pmd));
+}
+
+pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
+}
+
+void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+	kmem_cache_free(pmd_cachep, pmd);
+}
+#endif /* PAGETABLE_LEVELS > 2 */
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 280f6a1..198bcff 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -3,11 +3,8 @@
  *
  * Privileged Space Mapping Buffer (PMB) Support.
  *
- * Copyright (C) 2005, 2006, 2007 Paul Mundt
- *
- * P1/P2 Section mapping definitions from map32.h, which was:
- *
- *	Copyright 2003 (c) Lineo Solutions,Inc.
+ * Copyright (C) 2005 - 2010  Paul Mundt
+ * Copyright (C) 2010  Matt Fleming
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -24,47 +21,67 @@
 #include <linux/fs.h>
 #include <linux/seq_file.h>
 #include <linux/err.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/rwlock.h>
+#include <asm/sizes.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
+#include <asm/page.h>
 #include <asm/mmu.h>
-#include <asm/io.h>
 #include <asm/mmu_context.h>
 
-#define NR_PMB_ENTRIES	16
+struct pmb_entry;
 
-static void __pmb_unmap(struct pmb_entry *);
+struct pmb_entry {
+	unsigned long vpn;
+	unsigned long ppn;
+	unsigned long flags;
+	unsigned long size;
 
+	spinlock_t lock;
+
+	/*
+	 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
+	 * PMB_NO_ENTRY to search for a free one
+	 */
+	int entry;
+
+	/* Adjacent entry link for contiguous multi-entry mappings */
+	struct pmb_entry *link;
+};
+
+static void pmb_unmap_entry(struct pmb_entry *, int depth);
+
+static DEFINE_RWLOCK(pmb_rwlock);
 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
-static unsigned long pmb_map;
+static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
 
-static inline unsigned long mk_pmb_entry(unsigned int entry)
+static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
 {
 	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
 }
 
-static inline unsigned long mk_pmb_addr(unsigned int entry)
+static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
 {
 	return mk_pmb_entry(entry) | PMB_ADDR;
 }
 
-static inline unsigned long mk_pmb_data(unsigned int entry)
+static __always_inline unsigned long mk_pmb_data(unsigned int entry)
 {
 	return mk_pmb_entry(entry) | PMB_DATA;
 }
 
 static int pmb_alloc_entry(void)
 {
-	unsigned int pos;
+	int pos;
 
-repeat:
-	pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
-
-	if (unlikely(pos > NR_PMB_ENTRIES))
-		return -ENOSPC;
-
-	if (test_and_set_bit(pos, &pmb_map))
-		goto repeat;
+	pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
+	if (pos >= 0 && pos < NR_PMB_ENTRIES)
+		__set_bit(pos, pmb_map);
+	else
+		pos = -ENOSPC;
 
 	return pos;
 }
@@ -73,21 +90,34 @@
 				   unsigned long flags, int entry)
 {
 	struct pmb_entry *pmbe;
+	unsigned long irqflags;
+	void *ret = NULL;
 	int pos;
 
+	write_lock_irqsave(&pmb_rwlock, irqflags);
+
 	if (entry == PMB_NO_ENTRY) {
 		pos = pmb_alloc_entry();
-		if (pos < 0)
-			return ERR_PTR(pos);
+		if (unlikely(pos < 0)) {
+			ret = ERR_PTR(pos);
+			goto out;
+		}
 	} else {
-		if (test_bit(entry, &pmb_map))
-			return ERR_PTR(-ENOSPC);
+		if (__test_and_set_bit(entry, pmb_map)) {
+			ret = ERR_PTR(-ENOSPC);
+			goto out;
+		}
+
 		pos = entry;
 	}
 
+	write_unlock_irqrestore(&pmb_rwlock, irqflags);
+
 	pmbe = &pmb_entry_list[pos];
-	if (!pmbe)
-		return ERR_PTR(-ENOMEM);
+
+	memset(pmbe, 0, sizeof(struct pmb_entry));
+
+	spin_lock_init(&pmbe->lock);
 
 	pmbe->vpn	= vpn;
 	pmbe->ppn	= ppn;
@@ -95,101 +125,113 @@
 	pmbe->entry	= pos;
 
 	return pmbe;
+
+out:
+	write_unlock_irqrestore(&pmb_rwlock, irqflags);
+	return ret;
 }
 
 static void pmb_free(struct pmb_entry *pmbe)
 {
-	int pos = pmbe->entry;
+	__clear_bit(pmbe->entry, pmb_map);
 
-	pmbe->vpn	= 0;
-	pmbe->ppn	= 0;
-	pmbe->flags	= 0;
-	pmbe->entry	= 0;
-
-	clear_bit(pos, &pmb_map);
+	pmbe->entry	= PMB_NO_ENTRY;
+	pmbe->link	= NULL;
 }
 
 /*
- * Must be in P2 for __set_pmb_entry()
+ * Ensure that the PMB entries match our cache configuration.
+ *
+ * When we are in 32-bit address extended mode, CCR.CB becomes
+ * invalid, so care must be taken to manually adjust cacheable
+ * translations.
  */
-static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
-			    unsigned long flags, int pos)
+static __always_inline unsigned long pmb_cache_flags(void)
 {
-	ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
+	unsigned long flags = 0;
 
-#ifdef CONFIG_CACHE_WRITETHROUGH
-	/*
-	 * When we are in 32-bit address extended mode, CCR.CB becomes
-	 * invalid, so care must be taken to manually adjust cacheable
-	 * translations.
-	 */
-	if (likely(flags & PMB_C))
-		flags |= PMB_WT;
+#if defined(CONFIG_CACHE_WRITETHROUGH)
+	flags |= PMB_C | PMB_WT | PMB_UB;
+#elif defined(CONFIG_CACHE_WRITEBACK)
+	flags |= PMB_C;
 #endif
 
-	ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
+	return flags;
 }
 
-static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
+/*
+ * Must be run uncached.
+ */
+static void __set_pmb_entry(struct pmb_entry *pmbe)
 {
-	jump_to_uncached();
-	__set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
-	back_to_cached();
+	writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
+	writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
+			mk_pmb_data(pmbe->entry));
 }
 
-static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
+static void __clear_pmb_entry(struct pmb_entry *pmbe)
 {
-	unsigned int entry = pmbe->entry;
-	unsigned long addr;
+	unsigned long addr, data;
+	unsigned long addr_val, data_val;
 
-	if (unlikely(entry >= NR_PMB_ENTRIES))
-		return;
+	addr = mk_pmb_addr(pmbe->entry);
+	data = mk_pmb_data(pmbe->entry);
 
-	jump_to_uncached();
+	addr_val = __raw_readl(addr);
+	data_val = __raw_readl(data);
 
 	/* Clear V-bit */
-	addr = mk_pmb_addr(entry);
-	ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
-
-	addr = mk_pmb_data(entry);
-	ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
-
-	back_to_cached();
+	writel_uncached(addr_val & ~PMB_V, addr);
+	writel_uncached(data_val & ~PMB_V, data);
 }
 
+static void set_pmb_entry(struct pmb_entry *pmbe)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pmbe->lock, flags);
+	__set_pmb_entry(pmbe);
+	spin_unlock_irqrestore(&pmbe->lock, flags);
+}
 
 static struct {
 	unsigned long size;
 	int flag;
 } pmb_sizes[] = {
-	{ .size	= 0x20000000, .flag = PMB_SZ_512M, },
-	{ .size = 0x08000000, .flag = PMB_SZ_128M, },
-	{ .size = 0x04000000, .flag = PMB_SZ_64M,  },
-	{ .size = 0x01000000, .flag = PMB_SZ_16M,  },
+	{ .size	= SZ_512M, .flag = PMB_SZ_512M, },
+	{ .size = SZ_128M, .flag = PMB_SZ_128M, },
+	{ .size = SZ_64M,  .flag = PMB_SZ_64M,  },
+	{ .size = SZ_16M,  .flag = PMB_SZ_16M,  },
 };
 
 long pmb_remap(unsigned long vaddr, unsigned long phys,
-	       unsigned long size, unsigned long flags)
+	       unsigned long size, pgprot_t prot)
 {
 	struct pmb_entry *pmbp, *pmbe;
 	unsigned long wanted;
 	int pmb_flags, i;
 	long err;
+	u64 flags;
+
+	flags = pgprot_val(prot);
+
+	pmb_flags = PMB_WT | PMB_UB;
 
 	/* Convert typical pgprot value to the PMB equivalent */
 	if (flags & _PAGE_CACHABLE) {
-		if (flags & _PAGE_WT)
-			pmb_flags = PMB_WT;
-		else
-			pmb_flags = PMB_C;
-	} else
-		pmb_flags = PMB_WT | PMB_UB;
+		pmb_flags |= PMB_C;
+
+		if ((flags & _PAGE_WT) == 0)
+			pmb_flags &= ~(PMB_WT | PMB_UB);
+	}
 
 	pmbp = NULL;
 	wanted = size;
 
 again:
 	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
+		unsigned long flags;
+
 		if (size < pmb_sizes[i].size)
 			continue;
 
@@ -200,18 +242,25 @@
 			goto out;
 		}
 
-		set_pmb_entry(pmbe);
+		spin_lock_irqsave(&pmbe->lock, flags);
+
+		__set_pmb_entry(pmbe);
 
 		phys	+= pmb_sizes[i].size;
 		vaddr	+= pmb_sizes[i].size;
 		size	-= pmb_sizes[i].size;
 
+		pmbe->size = pmb_sizes[i].size;
+
 		/*
 		 * Link adjacent entries that span multiple PMB entries
 		 * for easier tear-down.
 		 */
-		if (likely(pmbp))
+		if (likely(pmbp)) {
+			spin_lock(&pmbp->lock);
 			pmbp->link = pmbe;
+			spin_unlock(&pmbp->lock);
+		}
 
 		pmbp = pmbe;
 
@@ -221,16 +270,17 @@
 		 * pmb_sizes[i].size again.
 		 */
 		i--;
+
+		spin_unlock_irqrestore(&pmbe->lock, flags);
 	}
 
-	if (size >= 0x1000000)
+	if (size >= SZ_16M)
 		goto again;
 
 	return wanted - size;
 
 out:
-	if (pmbp)
-		__pmb_unmap(pmbp);
+	pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
 
 	return err;
 }
@@ -240,24 +290,52 @@
 	struct pmb_entry *pmbe = NULL;
 	int i;
 
+	read_lock(&pmb_rwlock);
+
 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
-		if (test_bit(i, &pmb_map)) {
+		if (test_bit(i, pmb_map)) {
 			pmbe = &pmb_entry_list[i];
 			if (pmbe->vpn == addr)
 				break;
 		}
 	}
 
-	if (unlikely(!pmbe))
-		return;
+	read_unlock(&pmb_rwlock);
 
-	__pmb_unmap(pmbe);
+	pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
 }
 
-static void __pmb_unmap(struct pmb_entry *pmbe)
+static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
 {
-	BUG_ON(!test_bit(pmbe->entry, &pmb_map));
+	return (b->vpn == (a->vpn + a->size)) &&
+	       (b->ppn == (a->ppn + a->size)) &&
+	       (b->flags == a->flags);
+}
 
+static bool pmb_size_valid(unsigned long size)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+		if (pmb_sizes[i].size == size)
+			return true;
+
+	return false;
+}
+
+static int pmb_size_to_flags(unsigned long size)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+		if (pmb_sizes[i].size == size)
+			return pmb_sizes[i].flag;
+
+	return 0;
+}
+
+static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
+{
 	do {
 		struct pmb_entry *pmblink = pmbe;
 
@@ -268,102 +346,312 @@
 		 * this entry in pmb_alloc() (even if we haven't filled
 		 * it yet).
 		 *
-		 * Therefore, calling clear_pmb_entry() is safe as no
+		 * Therefore, calling __clear_pmb_entry() is safe as no
 		 * other mapping can be using that slot.
 		 */
-		clear_pmb_entry(pmbe);
+		__clear_pmb_entry(pmbe);
 
 		pmbe = pmblink->link;
 
 		pmb_free(pmblink);
-	} while (pmbe);
+	} while (pmbe && --depth);
 }
 
-#ifdef CONFIG_PMB
-int __uses_jump_to_uncached pmb_init(void)
+static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
 {
-	unsigned int i;
-	long size, ret;
+	unsigned long flags;
 
-	jump_to_uncached();
+	if (unlikely(!pmbe))
+		return;
 
-	/*
-	 * Insert PMB entries for the P1 and P2 areas so that, after
-	 * we've switched the MMU to 32-bit mode, the semantics of P1
-	 * and P2 are the same as in 29-bit mode, e.g.
-	 *
-	 *	P1 - provides a cached window onto physical memory
-	 *	P2 - provides an uncached window onto physical memory
-	 */
-	size = __MEMORY_START + __MEMORY_SIZE;
-
-	ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
-	BUG_ON(ret != size);
-
-	ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
-	BUG_ON(ret != size);
-
-	ctrl_outl(0, PMB_IRMCR);
-
-	/* PMB.SE and UB[7] */
-	ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
-
-	/* Flush out the TLB */
-	i =  ctrl_inl(MMUCR);
-	i |= MMUCR_TI;
-	ctrl_outl(i, MMUCR);
-
-	back_to_cached();
-
-	return 0;
+	write_lock_irqsave(&pmb_rwlock, flags);
+	__pmb_unmap_entry(pmbe, depth);
+	write_unlock_irqrestore(&pmb_rwlock, flags);
 }
-#else
-int __uses_jump_to_uncached pmb_init(void)
+
+static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
+{
+	return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
+}
+
+static void __init pmb_notify(void)
 {
 	int i;
-	unsigned long addr, data;
 
-	jump_to_uncached();
+	pr_info("PMB: boot mappings:\n");
 
-	for (i = 0; i < PMB_ENTRY_MAX; i++) {
+	read_lock(&pmb_rwlock);
+
+	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 		struct pmb_entry *pmbe;
-		unsigned long vpn, ppn, flags;
 
-		addr = PMB_DATA + (i << PMB_E_SHIFT);
-		data = ctrl_inl(addr);
-		if (!(data & PMB_V))
+		if (!test_bit(i, pmb_map))
 			continue;
 
-		if (data & PMB_C) {
-#if defined(CONFIG_CACHE_WRITETHROUGH)
-			data |= PMB_WT;
-#elif defined(CONFIG_CACHE_WRITEBACK)
-			data &= ~PMB_WT;
-#else
-			data &= ~(PMB_C | PMB_WT);
-#endif
-		}
-		ctrl_outl(data, addr);
+		pmbe = &pmb_entry_list[i];
 
-		ppn = data & PMB_PFN_MASK;
-
-		flags = data & (PMB_C | PMB_WT | PMB_UB);
-		flags |= data & PMB_SZ_MASK;
-
-		addr = PMB_ADDR + (i << PMB_E_SHIFT);
-		data = ctrl_inl(addr);
-
-		vpn = data & PMB_PFN_MASK;
-
-		pmbe = pmb_alloc(vpn, ppn, flags, i);
-		WARN_ON(IS_ERR(pmbe));
+		pr_info("       0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
+			pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
+			pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
 	}
 
-	back_to_cached();
-
-	return 0;
+	read_unlock(&pmb_rwlock);
 }
-#endif /* CONFIG_PMB */
+
+/*
+ * Sync our software copy of the PMB mappings with those in hardware. The
+ * mappings in the hardware PMB were either set up by the bootloader or
+ * very early on by the kernel.
+ */
+static void __init pmb_synchronize(void)
+{
+	struct pmb_entry *pmbp = NULL;
+	int i, j;
+
+	/*
+	 * Run through the initial boot mappings, log the established
+	 * ones, and blow away anything that falls outside of the valid
+	 * PPN range. Specifically, we only care about existing mappings
+	 * that impact the cached/uncached sections.
+	 *
+	 * Note that touching these can be a bit of a minefield; the boot
+	 * loader can establish multi-page mappings with the same caching
+	 * attributes, so we need to ensure that we aren't modifying a
+	 * mapping that we're presently executing from, or may execute
+	 * from in the case of straddling page boundaries.
+	 *
+	 * In the future we will have to tidy up after the boot loader by
+	 * jumping between the cached and uncached mappings and tearing
+	 * down alternating mappings while executing from the other.
+	 */
+	for (i = 0; i < NR_PMB_ENTRIES; i++) {
+		unsigned long addr, data;
+		unsigned long addr_val, data_val;
+		unsigned long ppn, vpn, flags;
+		unsigned long irqflags;
+		unsigned int size;
+		struct pmb_entry *pmbe;
+
+		addr = mk_pmb_addr(i);
+		data = mk_pmb_data(i);
+
+		addr_val = __raw_readl(addr);
+		data_val = __raw_readl(data);
+
+		/*
+		 * Skip over any bogus entries
+		 */
+		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
+			continue;
+
+		ppn = data_val & PMB_PFN_MASK;
+		vpn = addr_val & PMB_PFN_MASK;
+
+		/*
+		 * Only preserve in-range mappings.
+		 */
+		if (!pmb_ppn_in_range(ppn)) {
+			/*
+			 * Invalidate anything out of bounds.
+			 */
+			writel_uncached(addr_val & ~PMB_V, addr);
+			writel_uncached(data_val & ~PMB_V, data);
+			continue;
+		}
+
+		/*
+		 * Update the caching attributes if necessary
+		 */
+		if (data_val & PMB_C) {
+			data_val &= ~PMB_CACHE_MASK;
+			data_val |= pmb_cache_flags();
+
+			writel_uncached(data_val, data);
+		}
+
+		size = data_val & PMB_SZ_MASK;
+		flags = size | (data_val & PMB_CACHE_MASK);
+
+		pmbe = pmb_alloc(vpn, ppn, flags, i);
+		if (IS_ERR(pmbe)) {
+			WARN_ON_ONCE(1);
+			continue;
+		}
+
+		spin_lock_irqsave(&pmbe->lock, irqflags);
+
+		for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
+			if (pmb_sizes[j].flag == size)
+				pmbe->size = pmb_sizes[j].size;
+
+		if (pmbp) {
+			spin_lock(&pmbp->lock);
+
+			/*
+			 * Compare the previous entry against the current one to
+			 * see if the entries span a contiguous mapping. If so,
+			 * setup the entry links accordingly. Compound mappings
+			 * are later coalesced.
+			 */
+			if (pmb_can_merge(pmbp, pmbe))
+				pmbp->link = pmbe;
+
+			spin_unlock(&pmbp->lock);
+		}
+
+		pmbp = pmbe;
+
+		spin_unlock_irqrestore(&pmbe->lock, irqflags);
+	}
+}
+
+static void __init pmb_merge(struct pmb_entry *head)
+{
+	unsigned long span, newsize;
+	struct pmb_entry *tail;
+	int i = 1, depth = 0;
+
+	span = newsize = head->size;
+
+	tail = head->link;
+	while (tail) {
+		span += tail->size;
+
+		if (pmb_size_valid(span)) {
+			newsize = span;
+			depth = i;
+		}
+
+		/* This is the end of the line.. */
+		if (!tail->link)
+			break;
+
+		tail = tail->link;
+		i++;
+	}
+
+	/*
+	 * The merged page size must be valid.
+	 */
+	if (!pmb_size_valid(newsize))
+		return;
+
+	head->flags &= ~PMB_SZ_MASK;
+	head->flags |= pmb_size_to_flags(newsize);
+
+	head->size = newsize;
+
+	__pmb_unmap_entry(head->link, depth);
+	__set_pmb_entry(head);
+}
+
+static void __init pmb_coalesce(void)
+{
+	unsigned long flags;
+	int i;
+
+	write_lock_irqsave(&pmb_rwlock, flags);
+
+	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
+		struct pmb_entry *pmbe;
+
+		if (!test_bit(i, pmb_map))
+			continue;
+
+		pmbe = &pmb_entry_list[i];
+
+		/*
+		 * We're only interested in compound mappings
+		 */
+		if (!pmbe->link)
+			continue;
+
+		/*
+		 * Nothing to do if it already uses the largest possible
+		 * page size.
+		 */
+		if (pmbe->size == SZ_512M)
+			continue;
+
+		pmb_merge(pmbe);
+	}
+
+	write_unlock_irqrestore(&pmb_rwlock, flags);
+}
+
+#ifdef CONFIG_UNCACHED_MAPPING
+static void __init pmb_resize(void)
+{
+	int i;
+
+	/*
+	 * If the uncached mapping was constructed by the kernel, it will
+	 * already be a reasonable size.
+	 */
+	if (uncached_size == SZ_16M)
+		return;
+
+	read_lock(&pmb_rwlock);
+
+	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
+		struct pmb_entry *pmbe;
+		unsigned long flags;
+
+		if (!test_bit(i, pmb_map))
+			continue;
+
+		pmbe = &pmb_entry_list[i];
+
+		if (pmbe->vpn != uncached_start)
+			continue;
+
+		/*
+		 * Found it, now resize it.
+		 */
+		spin_lock_irqsave(&pmbe->lock, flags);
+
+		pmbe->size = SZ_16M;
+		pmbe->flags &= ~PMB_SZ_MASK;
+		pmbe->flags |= pmb_size_to_flags(pmbe->size);
+
+		uncached_resize(pmbe->size);
+
+		__set_pmb_entry(pmbe);
+
+		spin_unlock_irqrestore(&pmbe->lock, flags);
+	}
+
+	read_lock(&pmb_rwlock);
+}
+#endif
+
+void __init pmb_init(void)
+{
+	/* Synchronize software state */
+	pmb_synchronize();
+
+	/* Attempt to combine compound mappings */
+	pmb_coalesce();
+
+#ifdef CONFIG_UNCACHED_MAPPING
+	/* Resize initial mappings, if necessary */
+	pmb_resize();
+#endif
+
+	/* Log them */
+	pmb_notify();
+
+	writel_uncached(0, PMB_IRMCR);
+
+	/* Flush out the TLB */
+	__raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
+	ctrl_barrier();
+}
+
+bool __in_29bit_mode(void)
+{
+        return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
+}
 
 static int pmb_seq_show(struct seq_file *file, void *iter)
 {
@@ -378,8 +666,8 @@
 		unsigned int size;
 		char *sz_str = NULL;
 
-		addr = ctrl_inl(mk_pmb_addr(i));
-		data = ctrl_inl(mk_pmb_data(i));
+		addr = __raw_readl(mk_pmb_addr(i));
+		data = __raw_readl(mk_pmb_data(i));
 
 		size = data & PMB_SZ_MASK;
 		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
@@ -437,14 +725,21 @@
 	if (state.event == PM_EVENT_ON &&
 	    prev_state.event == PM_EVENT_FREEZE) {
 		struct pmb_entry *pmbe;
+
+		read_lock(&pmb_rwlock);
+
 		for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
-			if (test_bit(i, &pmb_map)) {
+			if (test_bit(i, pmb_map)) {
 				pmbe = &pmb_entry_list[i];
 				set_pmb_entry(pmbe);
 			}
 		}
+
+		read_unlock(&pmb_rwlock);
 	}
+
 	prev_state = state;
+
 	return 0;
 }
 
@@ -462,6 +757,5 @@
 {
 	return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
 }
-
 subsys_initcall(pmb_sysdev_init);
 #endif
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c
index 409b7c2..32dc674 100644
--- a/arch/sh/mm/tlb-pteaex.c
+++ b/arch/sh/mm/tlb-pteaex.c
@@ -68,8 +68,7 @@
  * in extended mode, the legacy 8-bit ASID field in address array 1 has
  * undefined behaviour.
  */
-void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
-						 unsigned long page)
+void local_flush_tlb_one(unsigned long asid, unsigned long page)
 {
 	jump_to_uncached();
 	__raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index ace8e6d..4f5f7cb 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -41,14 +41,14 @@
 
 	/* Set PTEH register */
 	vpn = (address & MMU_VPN_MASK) | get_asid();
-	ctrl_outl(vpn, MMU_PTEH);
+	__raw_writel(vpn, MMU_PTEH);
 
 	pteval = pte_val(pte);
 
 	/* Set PTEL register */
 	pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
 	/* conveniently, we want all the software flags to be 0 anyway */
-	ctrl_outl(pteval, MMU_PTEL);
+	__raw_writel(pteval, MMU_PTEL);
 
 	/* Load the TLB */
 	asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
@@ -75,5 +75,5 @@
 	}
 
 	for (i = 0; i < ways; i++)
-		ctrl_outl(data, addr + (i << 8));
+		__raw_writel(data, addr + (i << 8));
 }
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index 8cf550e..ccac77f 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -29,7 +29,7 @@
 
 	/* Set PTEH register */
 	vpn = (address & MMU_VPN_MASK) | get_asid();
-	ctrl_outl(vpn, MMU_PTEH);
+	__raw_writel(vpn, MMU_PTEH);
 
 	pteval = pte.pte_low;
 
@@ -41,13 +41,13 @@
 	 * the protection bits (with the exception of the compat-mode SZ
 	 * and PR bits, which are cleared) being written out in PTEL.
 	 */
-	ctrl_outl(pte.pte_high, MMU_PTEA);
+	__raw_writel(pte.pte_high, MMU_PTEA);
 #else
 	if (cpu_data->flags & CPU_HAS_PTEA) {
 		/* The last 3 bits and the first one of pteval contains
 		 * the PTEA timing control and space attribute bits
 		 */
-		ctrl_outl(copy_ptea_attributes(pteval), MMU_PTEA);
+		__raw_writel(copy_ptea_attributes(pteval), MMU_PTEA);
 	}
 #endif
 
@@ -57,15 +57,14 @@
 	pteval |= _PAGE_WT;
 #endif
 	/* conveniently, we want all the software flags to be 0 anyway */
-	ctrl_outl(pteval, MMU_PTEL);
+	__raw_writel(pteval, MMU_PTEL);
 
 	/* Load the TLB */
 	asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
 	local_irq_restore(flags);
 }
 
-void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
-						 unsigned long page)
+void local_flush_tlb_one(unsigned long asid, unsigned long page)
 {
 	unsigned long addr, data;
 
@@ -78,6 +77,6 @@
 	addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
 	data = page | asid; /* VALID bit is off */
 	jump_to_uncached();
-	ctrl_outl(data, addr);
+	__raw_writel(data, addr);
 	back_to_cached();
 }
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
index fdb64e4..f27dbe1 100644
--- a/arch/sh/mm/tlb-sh5.c
+++ b/arch/sh/mm/tlb-sh5.c
@@ -143,3 +143,42 @@
  */
 void sh64_teardown_tlb_slot(unsigned long long config_addr)
 	__attribute__ ((alias("__flush_tlb_slot")));
+
+static int dtlb_entry;
+static unsigned long long dtlb_entries[64];
+
+void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+	unsigned long long entry;
+	unsigned long paddr, flags;
+
+	BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries));
+
+	local_irq_save(flags);
+
+	entry = sh64_get_wired_dtlb_entry();
+	dtlb_entries[dtlb_entry++] = entry;
+
+	paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
+	paddr &= ~PAGE_MASK;
+
+	sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
+
+	local_irq_restore(flags);
+}
+
+void tlb_unwire_entry(void)
+{
+	unsigned long long entry;
+	unsigned long flags;
+
+	BUG_ON(!dtlb_entry);
+
+	local_irq_save(flags);
+	entry = dtlb_entries[dtlb_entry--];
+
+	sh64_teardown_tlb_slot(entry);
+	sh64_put_wired_dtlb_entry(entry);
+
+	local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-urb.c b/arch/sh/mm/tlb-urb.c
new file mode 100644
index 0000000..bb5b909
--- /dev/null
+++ b/arch/sh/mm/tlb-urb.c
@@ -0,0 +1,81 @@
+/*
+ * arch/sh/mm/tlb-urb.c
+ *
+ * TLB entry wiring helpers for URB-equipped parts.
+ *
+ * Copyright (C) 2010  Matt Fleming
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+
+/*
+ * Load the entry for 'addr' into the TLB and wire the entry.
+ */
+void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+	unsigned long status, flags;
+	int urb;
+
+	local_irq_save(flags);
+
+	/* Load the entry into the TLB */
+	__update_tlb(vma, addr, pte);
+
+	/* ... and wire it up. */
+	status = __raw_readl(MMUCR);
+	urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
+	status &= ~MMUCR_URB;
+
+	/*
+	 * Make sure we're not trying to wire the last TLB entry slot.
+	 */
+	BUG_ON(!--urb);
+
+	urb = urb % MMUCR_URB_NENTRIES;
+
+	status |= (urb << MMUCR_URB_SHIFT);
+	__raw_writel(status, MMUCR);
+	ctrl_barrier();
+
+	local_irq_restore(flags);
+}
+
+/*
+ * Unwire the last wired TLB entry.
+ *
+ * It should also be noted that it is not possible to wire and unwire
+ * TLB entries in an arbitrary order. If you wire TLB entry N, followed
+ * by entry N+1, you must unwire entry N+1 first, then entry N. In this
+ * respect, it works like a stack or LIFO queue.
+ */
+void tlb_unwire_entry(void)
+{
+	unsigned long status, flags;
+	int urb;
+
+	local_irq_save(flags);
+
+	status = __raw_readl(MMUCR);
+	urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
+	status &= ~MMUCR_URB;
+
+	/*
+	 * Make sure we're not trying to unwire a TLB entry when none
+	 * have been wired.
+	 */
+	BUG_ON(urb++ == MMUCR_URB_NENTRIES);
+
+	urb = urb % MMUCR_URB_NENTRIES;
+
+	status |= (urb << MMUCR_URB_SHIFT);
+	__raw_writel(status, MMUCR);
+	ctrl_barrier();
+
+	local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c
index 6f45c1f..004bb3f 100644
--- a/arch/sh/mm/tlbflush_32.c
+++ b/arch/sh/mm/tlbflush_32.c
@@ -132,9 +132,9 @@
 	 *      It's same position, bit #2.
 	 */
 	local_irq_save(flags);
-	status = ctrl_inl(MMUCR);
+	status = __raw_readl(MMUCR);
 	status |= 0x04;
-	ctrl_outl(status, MMUCR);
+	__raw_writel(status, MMUCR);
 	ctrl_barrier();
 	local_irq_restore(flags);
 }
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index de0b0e88..706da1d 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -36,7 +36,7 @@
 
 static inline void print_prots(pgprot_t prot)
 {
-	printk("prot is 0x%08lx\n",pgprot_val(prot));
+	printk("prot is 0x%016llx\n",pgprot_val(prot));
 
 	printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
 	       PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c
new file mode 100644
index 0000000..cf20a5c
--- /dev/null
+++ b/arch/sh/mm/uncached.c
@@ -0,0 +1,34 @@
+#include <linux/init.h>
+#include <asm/sizes.h>
+#include <asm/page.h>
+
+/*
+ * This is the offset of the uncached section from its cached alias.
+ *
+ * Legacy platforms handle trivial transitions between cached and
+ * uncached segments by making use of the 1:1 mapping relationship in
+ * 512MB lowmem, others via a special uncached mapping.
+ *
+ * Default value only valid in 29 bit mode, in 32bit mode this will be
+ * updated by the early PMB initialization code.
+ */
+unsigned long cached_to_uncached = SZ_512M;
+unsigned long uncached_size = SZ_512M;
+unsigned long uncached_start, uncached_end;
+
+int virt_addr_uncached(unsigned long kaddr)
+{
+	return (kaddr >= uncached_start) && (kaddr < uncached_end);
+}
+
+void __init uncached_init(void)
+{
+	uncached_start = memory_end;
+	uncached_end = uncached_start + uncached_size;
+}
+
+void __init uncached_resize(unsigned long size)
+{
+	uncached_size = size;
+	uncached_end = uncached_start + uncached_size;
+}
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index 6639b25..b25aa55 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -32,6 +32,7 @@
 SNAPGEAR		SH_SECUREEDGE5410
 EDOSK7705		SH_EDOSK7705
 EDOSK7760		SH_EDOSK7760
+SDK7786			SH_SDK7786
 SH4202_MICRODEV		SH_SH4202_MICRODEV
 SH03			SH_SH03
 LANDISK			SH_LANDISK
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
index b171ae8..b062de9 100644
--- a/arch/sparc/kernel/devices.c
+++ b/arch/sparc/kernel/devices.c
@@ -59,7 +59,7 @@
 
 	cur_inst = 0;
 	for_each_node_by_type(dp, "cpu") {
-		int err = check_cpu_node(dp->node, &cur_inst,
+		int err = check_cpu_node(dp->phandle, &cur_inst,
 					 compare, compare_arg,
 					 prom_node, mid);
 		if (!err) {
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 53a58b3..da527b3 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -433,7 +433,7 @@
 	if (!parent)
 		dev_set_name(&op->dev, "root");
 	else
-		dev_set_name(&op->dev, "%08x", dp->node);
+		dev_set_name(&op->dev, "%08x", dp->phandle);
 
 	if (of_device_register(op)) {
 		printk("%s: Could not register of device.\n",
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 0a6f2d1..b3d4cb5 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -676,7 +676,7 @@
 	if (!parent)
 		dev_set_name(&op->dev, "root");
 	else
-		dev_set_name(&op->dev, "%08x", dp->node);
+		dev_set_name(&op->dev, "%08x", dp->phandle);
 
 	if (of_device_register(op)) {
 		printk("%s: Could not register of device.\n",
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 592b03d..37b66c6 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -722,9 +722,10 @@
 {
 }
 
-void pcibios_align_resource(void *data, struct resource *res,
-			    resource_size_t size, resource_size_t align)
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+				resource_size_t size, resource_size_t align)
 {
+	return res->start;
 }
 
 int pcibios_enable_device(struct pci_dev *dev, int mask)
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 4e2724e..75e88c00 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -768,9 +768,10 @@
 	return str;
 }
 
-void pcibios_align_resource(void *data, struct resource *res,
-			    resource_size_t size, resource_size_t align)
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+				resource_size_t size, resource_size_t align)
 {
+	return res->start;
 }
 
 int pcibios_enable_device(struct pci_dev *pdev, int mask)
diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h
index 453397f..a8591ef 100644
--- a/arch/sparc/kernel/prom.h
+++ b/arch/sparc/kernel/prom.h
@@ -4,9 +4,6 @@
 #include <linux/spinlock.h>
 #include <asm/prom.h>
 
-extern struct device_node *allnodes;	/* temporary while merging */
-extern rwlock_t devtree_lock;	/* temporary while merging */
-
 extern void * prom_early_alloc(unsigned long size);
 extern void irq_trans_init(struct device_node *dp);
 
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index d80a65d..57ac9e2 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -37,18 +37,6 @@
 char *of_console_options;
 EXPORT_SYMBOL(of_console_options);
 
-struct device_node *of_find_node_by_phandle(phandle handle)
-{
-	struct device_node *np;
-
-	for (np = allnodes; np; np = np->allnext)
-		if (np->node == handle)
-			break;
-
-	return np;
-}
-EXPORT_SYMBOL(of_find_node_by_phandle);
-
 int of_getintprop_default(struct device_node *np, const char *name, int def)
 {
 	struct property *prop;
@@ -89,7 +77,7 @@
 			void *old_val = prop->value;
 			int ret;
 
-			ret = prom_setprop(dp->node, name, val, len);
+			ret = prom_setprop(dp->phandle, name, val, len);
 
 			err = -EINVAL;
 			if (ret >= 0) {
@@ -236,7 +224,7 @@
 
 	dp->name = get_one_property(node, "name");
 	dp->type = get_one_property(node, "device_type");
-	dp->node = node;
+	dp->phandle = node;
 
 	dp->properties = build_prop_list(node);
 
@@ -313,7 +301,7 @@
 
 	nextp = &allnodes->allnext;
 	allnodes->child = prom_build_tree(allnodes,
-					  prom_getchild(allnodes->node),
+					  prom_getchild(allnodes->phandle),
 					  &nextp);
 	of_console_init();
 
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index aa36223..eb14844 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -370,7 +370,7 @@
 	} else {
 		struct device_node *dp = of_find_node_by_cpuid(cpu);
 
-		prom_startcpu(dp->node, entry, cookie);
+		prom_startcpu(dp->phandle, entry, cookie);
 	}
 
 	for (timeout = 0; timeout < 50000; timeout++) {
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index b4bf9a9..05b58cc 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -29,6 +29,7 @@
 #define PCI_CHECK_ENABLE_AMD_MMCONF	0x20000
 #define PCI_HAS_IO_ECS		0x40000
 #define PCI_NOASSIGN_ROMS	0x80000
+#define PCI_ROOT_NO_CRS		0x100000
 
 extern unsigned int pci_probe;
 extern unsigned long pirq_table_addr;
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index 8cc1833..b3b531a 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -337,7 +337,7 @@
 	if (!shadow)
 		return true;
 
-	status = kmemcheck_shadow_test(shadow, size);
+	status = kmemcheck_shadow_test_all(shadow, size);
 
 	return status == KMEMCHECK_SHADOW_INITIALIZED;
 }
diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c
index 3f66b82..aec1242 100644
--- a/arch/x86/mm/kmemcheck/shadow.c
+++ b/arch/x86/mm/kmemcheck/shadow.c
@@ -125,12 +125,12 @@
 
 enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
 {
+#ifdef CONFIG_KMEMCHECK_PARTIAL_OK
 	uint8_t *x;
 	unsigned int i;
 
 	x = shadow;
 
-#ifdef CONFIG_KMEMCHECK_PARTIAL_OK
 	/*
 	 * Make sure _some_ bytes are initialized. Gcc frequently generates
 	 * code to access neighboring bytes.
@@ -139,13 +139,25 @@
 		if (x[i] == KMEMCHECK_SHADOW_INITIALIZED)
 			return x[i];
 	}
+
+	return x[0];
 #else
+	return kmemcheck_shadow_test_all(shadow, size);
+#endif
+}
+
+enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow, unsigned int size)
+{
+	uint8_t *x;
+	unsigned int i;
+
+	x = shadow;
+
 	/* All bytes must be initialized. */
 	for (i = 0; i < size; ++i) {
 		if (x[i] != KMEMCHECK_SHADOW_INITIALIZED)
 			return x[i];
 	}
-#endif
 
 	return x[0];
 }
diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h
index af46d9a..ff0b2f7 100644
--- a/arch/x86/mm/kmemcheck/shadow.h
+++ b/arch/x86/mm/kmemcheck/shadow.h
@@ -11,6 +11,8 @@
 void *kmemcheck_shadow_lookup(unsigned long address);
 
 enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size);
+enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow,
+						unsigned int size);
 void kmemcheck_shadow_set(void *shadow, unsigned int size);
 
 #endif
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 959e548..5f11ff6 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -15,6 +15,51 @@
 	int busnum;
 };
 
+static bool pci_use_crs = true;
+
+static int __init set_use_crs(const struct dmi_system_id *id)
+{
+	pci_use_crs = true;
+	return 0;
+}
+
+static const struct dmi_system_id pci_use_crs_table[] __initconst = {
+	/* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
+	{
+		.callback = set_use_crs,
+		.ident = "IBM System x3800",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
+		},
+	},
+	{}
+};
+
+void __init pci_acpi_crs_quirks(void)
+{
+	int year;
+
+	if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
+		pci_use_crs = false;
+
+	dmi_check_system(pci_use_crs_table);
+
+	/*
+	 * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
+	 * takes precedence over anything we figured out above.
+	 */
+	if (pci_probe & PCI_ROOT_NO_CRS)
+		pci_use_crs = false;
+	else if (pci_probe & PCI_USE__CRS)
+		pci_use_crs = true;
+
+	printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
+	       "if necessary, use \"pci=%s\" and report a bug\n",
+	       pci_use_crs ? "Using" : "Ignoring",
+	       pci_use_crs ? "nocrs" : "use_crs");
+}
+
 static acpi_status
 resource_to_addr(struct acpi_resource *resource,
 			struct acpi_resource_address64 *addr)
@@ -45,20 +90,6 @@
 	return AE_OK;
 }
 
-static int
-bus_has_transparent_bridge(struct pci_bus *bus)
-{
-	struct pci_dev *dev;
-
-	list_for_each_entry(dev, &bus->devices, bus_list) {
-		u16 class = dev->class >> 8;
-
-		if (class == PCI_CLASS_BRIDGE_PCI && dev->transparent)
-			return true;
-	}
-	return false;
-}
-
 static void
 align_resource(struct acpi_device *bridge, struct resource *res)
 {
@@ -92,12 +123,8 @@
 	acpi_status status;
 	unsigned long flags;
 	struct resource *root;
-	int max_root_bus_resources = PCI_BUS_NUM_RESOURCES;
 	u64 start, end;
 
-	if (bus_has_transparent_bridge(info->bus))
-		max_root_bus_resources -= 3;
-
 	status = resource_to_addr(acpi_res, &addr);
 	if (!ACPI_SUCCESS(status))
 		return AE_OK;
@@ -115,15 +142,6 @@
 
 	start = addr.minimum + addr.translation_offset;
 	end = start + addr.address_length - 1;
-	if (info->res_num >= max_root_bus_resources) {
-		if (pci_probe & PCI_USE__CRS)
-			printk(KERN_WARNING "PCI: Failed to allocate "
-			       "0x%lx-0x%lx from %s for %s due to _CRS "
-			       "returning more than %d resource descriptors\n",
-			       (unsigned long) start, (unsigned long) end,
-			       root->name, info->name, max_root_bus_resources);
-		return AE_OK;
-	}
 
 	res = &info->res[info->res_num];
 	res->name = info->name;
@@ -133,7 +151,7 @@
 	res->child = NULL;
 	align_resource(info->bridge, res);
 
-	if (!(pci_probe & PCI_USE__CRS)) {
+	if (!pci_use_crs) {
 		dev_printk(KERN_DEBUG, &info->bridge->dev,
 			   "host bridge window %pR (ignored)\n", res);
 		return AE_OK;
@@ -143,7 +161,7 @@
 		dev_err(&info->bridge->dev,
 			"can't allocate host bridge window %pR\n", res);
 	} else {
-		info->bus->resource[info->res_num] = res;
+		pci_bus_add_resource(info->bus, res, 0);
 		info->res_num++;
 		if (addr.translation_offset)
 			dev_info(&info->bridge->dev, "host bridge window %pR "
@@ -164,10 +182,8 @@
 	struct pci_root_info info;
 	size_t size;
 
-	if (!(pci_probe & PCI_USE__CRS))
-		dev_info(&device->dev,
-			 "ignoring host bridge windows from ACPI; "
-			 "boot with \"pci=use_crs\" to use them\n");
+	if (pci_use_crs)
+		pci_bus_remove_resources(bus);
 
 	info.bridge = device;
 	info.bus = bus;
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index f939d60..12d54ff 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -36,13 +36,14 @@
 	printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
 			b->number);
 
+	pci_bus_remove_resources(b);
 	info = &pci_root_info[i];
 	for (j = 0; j < info->res_num; j++) {
 		struct resource *res;
 		struct resource *root;
 
 		res = &info->res[j];
-		b->resource[j] = res;
+		pci_bus_add_resource(b, res, 0);
 		if (res->flags & IORESOURCE_IO)
 			root = &ioport_resource;
 		else
diff --git a/arch/x86/pci/bus_numa.h b/arch/x86/pci/bus_numa.h
index adbc23f..731b64e 100644
--- a/arch/x86/pci/bus_numa.h
+++ b/arch/x86/pci/bus_numa.h
@@ -2,8 +2,7 @@
 
 /*
  * sub bus (transparent) will use entres from 3 to store extra from
- * root, so need to make sure we have enough slot there, Should we
- * increase PCI_BUS_NUM_RESOURCES?
+ * root, so need to make sure we have enough slot there.
  */
 #define RES_NUM 16
 struct pci_root_info {
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index d2552c6..3736176 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -520,6 +520,9 @@
 	} else if (!strcmp(str, "use_crs")) {
 		pci_probe |= PCI_USE__CRS;
 		return NULL;
+	} else if (!strcmp(str, "nocrs")) {
+		pci_probe |= PCI_ROOT_NO_CRS;
+		return NULL;
 	} else if (!strcmp(str, "earlydump")) {
 		pci_early_dump_regs = 1;
 		return NULL;
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 5dc9e8c..5a8fbf8 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -60,22 +60,20 @@
  * but we want to try to avoid allocating at 0x2900-0x2bff
  * which might have be mirrored at 0x0100-0x03ff..
  */
-void
-pcibios_align_resource(void *data, struct resource *res,
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
 			resource_size_t size, resource_size_t align)
 {
 	struct pci_dev *dev = data;
+	resource_size_t start = res->start;
 
 	if (res->flags & IORESOURCE_IO) {
-		resource_size_t start = res->start;
-
 		if (skip_isa_ioresource_align(dev))
-			return;
-		if (start & 0x300) {
+			return start;
+		if (start & 0x300)
 			start = (start + 0x3ff) & ~0x3ff;
-			res->start = start;
-		}
 	}
+	return start;
 }
 EXPORT_SYMBOL(pcibios_align_resource);
 
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 0696d50..b02f6d8 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -590,6 +590,8 @@
 	case PCI_DEVICE_ID_INTEL_ICH10_1:
 	case PCI_DEVICE_ID_INTEL_ICH10_2:
 	case PCI_DEVICE_ID_INTEL_ICH10_3:
+	case PCI_DEVICE_ID_INTEL_CPT_LPC1:
+	case PCI_DEVICE_ID_INTEL_CPT_LPC2:
 		r->name = "PIIX/ICH";
 		r->get = pirq_piix_get;
 		r->set = pirq_piix_set;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index b19d1e5..8f3f9a5 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -303,22 +303,17 @@
 {
 	struct pci_mmcfg_region *cfg, *cfgx;
 
-	/* last one*/
-	cfg = list_entry(pci_mmcfg_list.prev, typeof(*cfg), list);
-	if (cfg)
-		if (cfg->end_bus < cfg->start_bus)
-			cfg->end_bus = 255;
-
-	if (list_is_singular(&pci_mmcfg_list))
-		return;
-
-	/* don't overlap please */
+	/* Fixup overlaps */
 	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
 		if (cfg->end_bus < cfg->start_bus)
 			cfg->end_bus = 255;
 
+		/* Don't access the list head ! */
+		if (cfg->list.next == &pci_mmcfg_list)
+			break;
+
 		cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
-		if (cfg != cfgx && cfg->end_bus >= cfgx->start_bus)
+		if (cfg->end_bus >= cfgx->start_bus)
 			cfg->end_bus = cfgx->start_bus - 1;
 	}
 }
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index b7c0734..cd10269 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -69,26 +69,25 @@
  * but we want to try to avoid allocating at 0x2900-0x2bff
  * which might have be mirrored at 0x0100-0x03ff..
  */
-void
-pcibios_align_resource(void *data, struct resource *res, resource_size_t size,
-    		       resource_size_t align)
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
+		       resource_size_t size, resource_size_t align)
 {
 	struct pci_dev *dev = data;
+	resource_size_t start = res->start;
 
 	if (res->flags & IORESOURCE_IO) {
-		resource_size_t start = res->start;
-
 		if (size > 0x100) {
 			printk(KERN_ERR "PCI: I/O Region %s/%d too large"
 			       " (%ld bytes)\n", pci_name(dev),
 			       dev->resource - res, size);
 		}
 
-		if (start & 0x300) {
+		if (start & 0x300)
 			start = (start + 0x3ff) & ~0x3ff;
-			res->start = start;
-		}
 	}
+
+	return start;
 }
 
 int
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 81c185a..6a2e295 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -114,6 +114,16 @@
 	help
 	  These are 'Null' algorithms, used by IPsec, which do nothing.
 
+config CRYPTO_PCRYPT
+	tristate "Parallel crypto engine (EXPERIMENTAL)"
+	depends on SMP && EXPERIMENTAL
+	select PADATA
+	select CRYPTO_MANAGER
+	select CRYPTO_AEAD
+	help
+	  This converts an arbitrary crypto algorithm into a parallel
+	  algorithm that executes in kernel threads.
+
 config CRYPTO_WORKQUEUE
        tristate
 
diff --git a/crypto/Makefile b/crypto/Makefile
index 9e8f619..d7e6441 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -56,6 +56,7 @@
 obj-$(CONFIG_CRYPTO_CTR) += ctr.o
 obj-$(CONFIG_CRYPTO_GCM) += gcm.o
 obj-$(CONFIG_CRYPTO_CCM) += ccm.o
+obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
 obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
 obj-$(CONFIG_CRYPTO_DES) += des_generic.o
 obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index f6f0833..fe980da 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -1,6 +1,6 @@
 /*
  * Asynchronous block chaining cipher operations.
- * 
+ *
  * This is the asynchronous version of blkcipher.c indicating completion
  * via a callback.
  *
@@ -8,7 +8,7 @@
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option) 
+ * Software Foundation; either version 2 of the License, or (at your option)
  * any later version.
  *
  */
diff --git a/crypto/aead.c b/crypto/aead.c
index 0a55da7..6729e8f 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -1,13 +1,13 @@
 /*
  * AEAD: Authenticated Encryption with Associated Data
- * 
+ *
  * This file provides API support for AEAD algorithms.
  *
  * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option) 
+ * Software Foundation; either version 2 of the License, or (at your option)
  * any later version.
  *
  */
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index e78b7ee..a68c73d 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1,4 +1,4 @@
-/* 
+/*
  * Cryptographic API.
  *
  * AES Cipher Algorithm.
@@ -1127,7 +1127,7 @@
 
 #define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
 
-#define imix_col(y,x)	do {		\
+#define imix_col(y, x)	do {		\
 	u	= star_x(x);		\
 	v	= star_x(u);		\
 	w	= star_x(v);		\
diff --git a/crypto/algapi.c b/crypto/algapi.c
index f149b1c..3e4524e 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -230,7 +230,7 @@
 	list_add(&alg->cra_list, &crypto_alg_list);
 	list_add(&larval->alg.cra_list, &crypto_alg_list);
 
-out:	
+out:
 	return larval;
 
 free_larval:
@@ -388,7 +388,7 @@
 {
 	int ret;
 	LIST_HEAD(list);
-	
+
 	down_write(&crypto_alg_sem);
 	ret = crypto_remove_alg(alg, &list);
 	up_write(&crypto_alg_sem);
diff --git a/crypto/anubis.c b/crypto/anubis.c
index e42c3a8..77530d5 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -469,14 +469,13 @@
 	u32 kappa[ANUBIS_MAX_N];
 	u32 inter[ANUBIS_MAX_N];
 
-	switch (key_len)
-	{
+	switch (key_len) {
 		case 16: case 20: case 24: case 28:
 		case 32: case 36: case 40:
 			break;
 		default:
 			*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
-			return - EINVAL;
+			return -EINVAL;
 	}
 
 	ctx->key_len = key_len * 8;
@@ -530,23 +529,24 @@
 		/*
 		 * compute kappa^{r+1} from kappa^r:
 		 */
-		if (r == R) {
+		if (r == R)
 			break;
-		}
 		for (i = 0; i < N; i++) {
 			int j = i;
 			inter[i]  = T0[(kappa[j--] >> 24)       ];
-			if (j < 0) j = N - 1;
+			if (j < 0)
+				j = N - 1;
 			inter[i] ^= T1[(kappa[j--] >> 16) & 0xff];
-			if (j < 0) j = N - 1;
+			if (j < 0)
+				j = N - 1;
 			inter[i] ^= T2[(kappa[j--] >>  8) & 0xff];
-			if (j < 0) j = N - 1;
+			if (j < 0)
+				j = N - 1;
 			inter[i] ^= T3[(kappa[j  ]      ) & 0xff];
 		}
 		kappa[0] = inter[0] ^ rc[r];
-		for (i = 1; i < N; i++) {
+		for (i = 1; i < N; i++)
 			kappa[i] = inter[i];
-		}
 	}
 
 	/*
@@ -690,7 +690,7 @@
 static int __init anubis_mod_init(void)
 {
 	int ret = 0;
-	
+
 	ret = crypto_register_alg(&anubis_alg);
 	return ret;
 }
diff --git a/crypto/api.c b/crypto/api.c
index 798526d..033a714 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -10,7 +10,7 @@
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option) 
+ * Software Foundation; either version 2 of the License, or (at your option)
  * any later version.
  *
  */
@@ -288,11 +288,11 @@
 
 	case CRYPTO_ALG_TYPE_COMPRESS:
 		return crypto_init_compress_ops(tfm);
-	
+
 	default:
 		break;
 	}
-	
+
 	BUG();
 	return -EINVAL;
 }
@@ -315,10 +315,9 @@
 	case CRYPTO_ALG_TYPE_COMPRESS:
 		crypto_exit_compress_ops(tfm);
 		break;
-	
+
 	default:
 		BUG();
-		
 	}
 }
 
@@ -593,12 +592,12 @@
 {
 	int ret = 0;
 	struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
-	
+
 	if (!IS_ERR(alg)) {
 		crypto_mod_put(alg);
 		ret = 1;
 	}
-	
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(crypto_has_alg);
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 4d6f49a..1887090 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -194,7 +194,7 @@
 	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
 				 authsize, 0);
 
-	err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0;
+	err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
 	if (err)
 		goto out;
 
@@ -231,7 +231,7 @@
 	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
 				 authsize, 0);
 
-	err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0;
+	err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
 	if (err)
 		goto out;
 
@@ -464,7 +464,7 @@
 	ihash = ohash + authsize;
 	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
 				 authsize, 0);
-	return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0;
+	return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
 }
 
 static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
@@ -557,11 +557,11 @@
 
 	ctx->auth = auth;
 	ctx->enc = enc;
-	
+
 	tfm->crt_aead.reqsize = max_t(unsigned int,
 				crypto_ahash_reqsize(auth) + ctx->reqoff +
 				sizeof(struct authenc_request_ctx) +
-				sizeof(struct ahash_request), 
+				sizeof(struct ahash_request),
 				sizeof(struct skcipher_givcrypt_request) +
 				crypto_ablkcipher_reqsize(enc) +
 				crypto_ablkcipher_ivsize(enc));
diff --git a/crypto/blowfish.c b/crypto/blowfish.c
index 6f5b487..a67d52e 100644
--- a/crypto/blowfish.c
+++ b/crypto/blowfish.c
@@ -1,4 +1,4 @@
-/* 
+/*
  * Cryptographic API.
  *
  * Blowfish Cipher Algorithm, by Bruce Schneier.
@@ -299,7 +299,7 @@
 	0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
 };
 
-/* 
+/*
  * Round loop unrolling macros, S is a pointer to a S-Box array
  * organized in 4 unsigned longs at a row.
  */
@@ -315,7 +315,7 @@
 
 /*
  * The blowfish encipher, processes 64-bit blocks.
- * NOTE: This function MUSTN'T respect endianess 
+ * NOTE: This function MUSTN'T respect endianess
  */
 static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src)
 {
@@ -395,7 +395,7 @@
 	out_blk[1] = cpu_to_be32(yl);
 }
 
-/* 
+/*
  * Calculates the blowfish S and P boxes for encryption and decryption.
  */
 static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
@@ -417,10 +417,10 @@
 
 	/* Actual subkey generation */
 	for (j = 0, i = 0; i < 16 + 2; i++) {
-		temp = (((u32 )key[j] << 24) |
-			((u32 )key[(j + 1) % keylen] << 16) |
-			((u32 )key[(j + 2) % keylen] << 8) |
-			((u32 )key[(j + 3) % keylen]));
+		temp = (((u32)key[j] << 24) |
+			((u32)key[(j + 1) % keylen] << 16) |
+			((u32)key[(j + 2) % keylen] << 8) |
+			((u32)key[(j + 3) % keylen]));
 
 		P[i] = P[i] ^ temp;
 		j = (j + 4) % keylen;
@@ -444,7 +444,7 @@
 			S[count + 1] = data[1];
 		}
 	}
-	
+
 	/* Bruce says not to bother with the weak key check. */
 	return 0;
 }
diff --git a/crypto/camellia.c b/crypto/camellia.c
index 964635d..64cff46 100644
--- a/crypto/camellia.c
+++ b/crypto/camellia.c
@@ -39,271 +39,271 @@
 #include <asm/unaligned.h>
 
 static const u32 camellia_sp1110[256] = {
-	0x70707000,0x82828200,0x2c2c2c00,0xececec00,
-	0xb3b3b300,0x27272700,0xc0c0c000,0xe5e5e500,
-	0xe4e4e400,0x85858500,0x57575700,0x35353500,
-	0xeaeaea00,0x0c0c0c00,0xaeaeae00,0x41414100,
-	0x23232300,0xefefef00,0x6b6b6b00,0x93939300,
-	0x45454500,0x19191900,0xa5a5a500,0x21212100,
-	0xededed00,0x0e0e0e00,0x4f4f4f00,0x4e4e4e00,
-	0x1d1d1d00,0x65656500,0x92929200,0xbdbdbd00,
-	0x86868600,0xb8b8b800,0xafafaf00,0x8f8f8f00,
-	0x7c7c7c00,0xebebeb00,0x1f1f1f00,0xcecece00,
-	0x3e3e3e00,0x30303000,0xdcdcdc00,0x5f5f5f00,
-	0x5e5e5e00,0xc5c5c500,0x0b0b0b00,0x1a1a1a00,
-	0xa6a6a600,0xe1e1e100,0x39393900,0xcacaca00,
-	0xd5d5d500,0x47474700,0x5d5d5d00,0x3d3d3d00,
-	0xd9d9d900,0x01010100,0x5a5a5a00,0xd6d6d600,
-	0x51515100,0x56565600,0x6c6c6c00,0x4d4d4d00,
-	0x8b8b8b00,0x0d0d0d00,0x9a9a9a00,0x66666600,
-	0xfbfbfb00,0xcccccc00,0xb0b0b000,0x2d2d2d00,
-	0x74747400,0x12121200,0x2b2b2b00,0x20202000,
-	0xf0f0f000,0xb1b1b100,0x84848400,0x99999900,
-	0xdfdfdf00,0x4c4c4c00,0xcbcbcb00,0xc2c2c200,
-	0x34343400,0x7e7e7e00,0x76767600,0x05050500,
-	0x6d6d6d00,0xb7b7b700,0xa9a9a900,0x31313100,
-	0xd1d1d100,0x17171700,0x04040400,0xd7d7d700,
-	0x14141400,0x58585800,0x3a3a3a00,0x61616100,
-	0xdedede00,0x1b1b1b00,0x11111100,0x1c1c1c00,
-	0x32323200,0x0f0f0f00,0x9c9c9c00,0x16161600,
-	0x53535300,0x18181800,0xf2f2f200,0x22222200,
-	0xfefefe00,0x44444400,0xcfcfcf00,0xb2b2b200,
-	0xc3c3c300,0xb5b5b500,0x7a7a7a00,0x91919100,
-	0x24242400,0x08080800,0xe8e8e800,0xa8a8a800,
-	0x60606000,0xfcfcfc00,0x69696900,0x50505000,
-	0xaaaaaa00,0xd0d0d000,0xa0a0a000,0x7d7d7d00,
-	0xa1a1a100,0x89898900,0x62626200,0x97979700,
-	0x54545400,0x5b5b5b00,0x1e1e1e00,0x95959500,
-	0xe0e0e000,0xffffff00,0x64646400,0xd2d2d200,
-	0x10101000,0xc4c4c400,0x00000000,0x48484800,
-	0xa3a3a300,0xf7f7f700,0x75757500,0xdbdbdb00,
-	0x8a8a8a00,0x03030300,0xe6e6e600,0xdadada00,
-	0x09090900,0x3f3f3f00,0xdddddd00,0x94949400,
-	0x87878700,0x5c5c5c00,0x83838300,0x02020200,
-	0xcdcdcd00,0x4a4a4a00,0x90909000,0x33333300,
-	0x73737300,0x67676700,0xf6f6f600,0xf3f3f300,
-	0x9d9d9d00,0x7f7f7f00,0xbfbfbf00,0xe2e2e200,
-	0x52525200,0x9b9b9b00,0xd8d8d800,0x26262600,
-	0xc8c8c800,0x37373700,0xc6c6c600,0x3b3b3b00,
-	0x81818100,0x96969600,0x6f6f6f00,0x4b4b4b00,
-	0x13131300,0xbebebe00,0x63636300,0x2e2e2e00,
-	0xe9e9e900,0x79797900,0xa7a7a700,0x8c8c8c00,
-	0x9f9f9f00,0x6e6e6e00,0xbcbcbc00,0x8e8e8e00,
-	0x29292900,0xf5f5f500,0xf9f9f900,0xb6b6b600,
-	0x2f2f2f00,0xfdfdfd00,0xb4b4b400,0x59595900,
-	0x78787800,0x98989800,0x06060600,0x6a6a6a00,
-	0xe7e7e700,0x46464600,0x71717100,0xbababa00,
-	0xd4d4d400,0x25252500,0xababab00,0x42424200,
-	0x88888800,0xa2a2a200,0x8d8d8d00,0xfafafa00,
-	0x72727200,0x07070700,0xb9b9b900,0x55555500,
-	0xf8f8f800,0xeeeeee00,0xacacac00,0x0a0a0a00,
-	0x36363600,0x49494900,0x2a2a2a00,0x68686800,
-	0x3c3c3c00,0x38383800,0xf1f1f100,0xa4a4a400,
-	0x40404000,0x28282800,0xd3d3d300,0x7b7b7b00,
-	0xbbbbbb00,0xc9c9c900,0x43434300,0xc1c1c100,
-	0x15151500,0xe3e3e300,0xadadad00,0xf4f4f400,
-	0x77777700,0xc7c7c700,0x80808000,0x9e9e9e00,
+	0x70707000, 0x82828200, 0x2c2c2c00, 0xececec00,
+	0xb3b3b300, 0x27272700, 0xc0c0c000, 0xe5e5e500,
+	0xe4e4e400, 0x85858500, 0x57575700, 0x35353500,
+	0xeaeaea00, 0x0c0c0c00, 0xaeaeae00, 0x41414100,
+	0x23232300, 0xefefef00, 0x6b6b6b00, 0x93939300,
+	0x45454500, 0x19191900, 0xa5a5a500, 0x21212100,
+	0xededed00, 0x0e0e0e00, 0x4f4f4f00, 0x4e4e4e00,
+	0x1d1d1d00, 0x65656500, 0x92929200, 0xbdbdbd00,
+	0x86868600, 0xb8b8b800, 0xafafaf00, 0x8f8f8f00,
+	0x7c7c7c00, 0xebebeb00, 0x1f1f1f00, 0xcecece00,
+	0x3e3e3e00, 0x30303000, 0xdcdcdc00, 0x5f5f5f00,
+	0x5e5e5e00, 0xc5c5c500, 0x0b0b0b00, 0x1a1a1a00,
+	0xa6a6a600, 0xe1e1e100, 0x39393900, 0xcacaca00,
+	0xd5d5d500, 0x47474700, 0x5d5d5d00, 0x3d3d3d00,
+	0xd9d9d900, 0x01010100, 0x5a5a5a00, 0xd6d6d600,
+	0x51515100, 0x56565600, 0x6c6c6c00, 0x4d4d4d00,
+	0x8b8b8b00, 0x0d0d0d00, 0x9a9a9a00, 0x66666600,
+	0xfbfbfb00, 0xcccccc00, 0xb0b0b000, 0x2d2d2d00,
+	0x74747400, 0x12121200, 0x2b2b2b00, 0x20202000,
+	0xf0f0f000, 0xb1b1b100, 0x84848400, 0x99999900,
+	0xdfdfdf00, 0x4c4c4c00, 0xcbcbcb00, 0xc2c2c200,
+	0x34343400, 0x7e7e7e00, 0x76767600, 0x05050500,
+	0x6d6d6d00, 0xb7b7b700, 0xa9a9a900, 0x31313100,
+	0xd1d1d100, 0x17171700, 0x04040400, 0xd7d7d700,
+	0x14141400, 0x58585800, 0x3a3a3a00, 0x61616100,
+	0xdedede00, 0x1b1b1b00, 0x11111100, 0x1c1c1c00,
+	0x32323200, 0x0f0f0f00, 0x9c9c9c00, 0x16161600,
+	0x53535300, 0x18181800, 0xf2f2f200, 0x22222200,
+	0xfefefe00, 0x44444400, 0xcfcfcf00, 0xb2b2b200,
+	0xc3c3c300, 0xb5b5b500, 0x7a7a7a00, 0x91919100,
+	0x24242400, 0x08080800, 0xe8e8e800, 0xa8a8a800,
+	0x60606000, 0xfcfcfc00, 0x69696900, 0x50505000,
+	0xaaaaaa00, 0xd0d0d000, 0xa0a0a000, 0x7d7d7d00,
+	0xa1a1a100, 0x89898900, 0x62626200, 0x97979700,
+	0x54545400, 0x5b5b5b00, 0x1e1e1e00, 0x95959500,
+	0xe0e0e000, 0xffffff00, 0x64646400, 0xd2d2d200,
+	0x10101000, 0xc4c4c400, 0x00000000, 0x48484800,
+	0xa3a3a300, 0xf7f7f700, 0x75757500, 0xdbdbdb00,
+	0x8a8a8a00, 0x03030300, 0xe6e6e600, 0xdadada00,
+	0x09090900, 0x3f3f3f00, 0xdddddd00, 0x94949400,
+	0x87878700, 0x5c5c5c00, 0x83838300, 0x02020200,
+	0xcdcdcd00, 0x4a4a4a00, 0x90909000, 0x33333300,
+	0x73737300, 0x67676700, 0xf6f6f600, 0xf3f3f300,
+	0x9d9d9d00, 0x7f7f7f00, 0xbfbfbf00, 0xe2e2e200,
+	0x52525200, 0x9b9b9b00, 0xd8d8d800, 0x26262600,
+	0xc8c8c800, 0x37373700, 0xc6c6c600, 0x3b3b3b00,
+	0x81818100, 0x96969600, 0x6f6f6f00, 0x4b4b4b00,
+	0x13131300, 0xbebebe00, 0x63636300, 0x2e2e2e00,
+	0xe9e9e900, 0x79797900, 0xa7a7a700, 0x8c8c8c00,
+	0x9f9f9f00, 0x6e6e6e00, 0xbcbcbc00, 0x8e8e8e00,
+	0x29292900, 0xf5f5f500, 0xf9f9f900, 0xb6b6b600,
+	0x2f2f2f00, 0xfdfdfd00, 0xb4b4b400, 0x59595900,
+	0x78787800, 0x98989800, 0x06060600, 0x6a6a6a00,
+	0xe7e7e700, 0x46464600, 0x71717100, 0xbababa00,
+	0xd4d4d400, 0x25252500, 0xababab00, 0x42424200,
+	0x88888800, 0xa2a2a200, 0x8d8d8d00, 0xfafafa00,
+	0x72727200, 0x07070700, 0xb9b9b900, 0x55555500,
+	0xf8f8f800, 0xeeeeee00, 0xacacac00, 0x0a0a0a00,
+	0x36363600, 0x49494900, 0x2a2a2a00, 0x68686800,
+	0x3c3c3c00, 0x38383800, 0xf1f1f100, 0xa4a4a400,
+	0x40404000, 0x28282800, 0xd3d3d300, 0x7b7b7b00,
+	0xbbbbbb00, 0xc9c9c900, 0x43434300, 0xc1c1c100,
+	0x15151500, 0xe3e3e300, 0xadadad00, 0xf4f4f400,
+	0x77777700, 0xc7c7c700, 0x80808000, 0x9e9e9e00,
 };
 
 static const u32 camellia_sp0222[256] = {
-	0x00e0e0e0,0x00050505,0x00585858,0x00d9d9d9,
-	0x00676767,0x004e4e4e,0x00818181,0x00cbcbcb,
-	0x00c9c9c9,0x000b0b0b,0x00aeaeae,0x006a6a6a,
-	0x00d5d5d5,0x00181818,0x005d5d5d,0x00828282,
-	0x00464646,0x00dfdfdf,0x00d6d6d6,0x00272727,
-	0x008a8a8a,0x00323232,0x004b4b4b,0x00424242,
-	0x00dbdbdb,0x001c1c1c,0x009e9e9e,0x009c9c9c,
-	0x003a3a3a,0x00cacaca,0x00252525,0x007b7b7b,
-	0x000d0d0d,0x00717171,0x005f5f5f,0x001f1f1f,
-	0x00f8f8f8,0x00d7d7d7,0x003e3e3e,0x009d9d9d,
-	0x007c7c7c,0x00606060,0x00b9b9b9,0x00bebebe,
-	0x00bcbcbc,0x008b8b8b,0x00161616,0x00343434,
-	0x004d4d4d,0x00c3c3c3,0x00727272,0x00959595,
-	0x00ababab,0x008e8e8e,0x00bababa,0x007a7a7a,
-	0x00b3b3b3,0x00020202,0x00b4b4b4,0x00adadad,
-	0x00a2a2a2,0x00acacac,0x00d8d8d8,0x009a9a9a,
-	0x00171717,0x001a1a1a,0x00353535,0x00cccccc,
-	0x00f7f7f7,0x00999999,0x00616161,0x005a5a5a,
-	0x00e8e8e8,0x00242424,0x00565656,0x00404040,
-	0x00e1e1e1,0x00636363,0x00090909,0x00333333,
-	0x00bfbfbf,0x00989898,0x00979797,0x00858585,
-	0x00686868,0x00fcfcfc,0x00ececec,0x000a0a0a,
-	0x00dadada,0x006f6f6f,0x00535353,0x00626262,
-	0x00a3a3a3,0x002e2e2e,0x00080808,0x00afafaf,
-	0x00282828,0x00b0b0b0,0x00747474,0x00c2c2c2,
-	0x00bdbdbd,0x00363636,0x00222222,0x00383838,
-	0x00646464,0x001e1e1e,0x00393939,0x002c2c2c,
-	0x00a6a6a6,0x00303030,0x00e5e5e5,0x00444444,
-	0x00fdfdfd,0x00888888,0x009f9f9f,0x00656565,
-	0x00878787,0x006b6b6b,0x00f4f4f4,0x00232323,
-	0x00484848,0x00101010,0x00d1d1d1,0x00515151,
-	0x00c0c0c0,0x00f9f9f9,0x00d2d2d2,0x00a0a0a0,
-	0x00555555,0x00a1a1a1,0x00414141,0x00fafafa,
-	0x00434343,0x00131313,0x00c4c4c4,0x002f2f2f,
-	0x00a8a8a8,0x00b6b6b6,0x003c3c3c,0x002b2b2b,
-	0x00c1c1c1,0x00ffffff,0x00c8c8c8,0x00a5a5a5,
-	0x00202020,0x00898989,0x00000000,0x00909090,
-	0x00474747,0x00efefef,0x00eaeaea,0x00b7b7b7,
-	0x00151515,0x00060606,0x00cdcdcd,0x00b5b5b5,
-	0x00121212,0x007e7e7e,0x00bbbbbb,0x00292929,
-	0x000f0f0f,0x00b8b8b8,0x00070707,0x00040404,
-	0x009b9b9b,0x00949494,0x00212121,0x00666666,
-	0x00e6e6e6,0x00cecece,0x00ededed,0x00e7e7e7,
-	0x003b3b3b,0x00fefefe,0x007f7f7f,0x00c5c5c5,
-	0x00a4a4a4,0x00373737,0x00b1b1b1,0x004c4c4c,
-	0x00919191,0x006e6e6e,0x008d8d8d,0x00767676,
-	0x00030303,0x002d2d2d,0x00dedede,0x00969696,
-	0x00262626,0x007d7d7d,0x00c6c6c6,0x005c5c5c,
-	0x00d3d3d3,0x00f2f2f2,0x004f4f4f,0x00191919,
-	0x003f3f3f,0x00dcdcdc,0x00797979,0x001d1d1d,
-	0x00525252,0x00ebebeb,0x00f3f3f3,0x006d6d6d,
-	0x005e5e5e,0x00fbfbfb,0x00696969,0x00b2b2b2,
-	0x00f0f0f0,0x00313131,0x000c0c0c,0x00d4d4d4,
-	0x00cfcfcf,0x008c8c8c,0x00e2e2e2,0x00757575,
-	0x00a9a9a9,0x004a4a4a,0x00575757,0x00848484,
-	0x00111111,0x00454545,0x001b1b1b,0x00f5f5f5,
-	0x00e4e4e4,0x000e0e0e,0x00737373,0x00aaaaaa,
-	0x00f1f1f1,0x00dddddd,0x00595959,0x00141414,
-	0x006c6c6c,0x00929292,0x00545454,0x00d0d0d0,
-	0x00787878,0x00707070,0x00e3e3e3,0x00494949,
-	0x00808080,0x00505050,0x00a7a7a7,0x00f6f6f6,
-	0x00777777,0x00939393,0x00868686,0x00838383,
-	0x002a2a2a,0x00c7c7c7,0x005b5b5b,0x00e9e9e9,
-	0x00eeeeee,0x008f8f8f,0x00010101,0x003d3d3d,
+	0x00e0e0e0, 0x00050505, 0x00585858, 0x00d9d9d9,
+	0x00676767, 0x004e4e4e, 0x00818181, 0x00cbcbcb,
+	0x00c9c9c9, 0x000b0b0b, 0x00aeaeae, 0x006a6a6a,
+	0x00d5d5d5, 0x00181818, 0x005d5d5d, 0x00828282,
+	0x00464646, 0x00dfdfdf, 0x00d6d6d6, 0x00272727,
+	0x008a8a8a, 0x00323232, 0x004b4b4b, 0x00424242,
+	0x00dbdbdb, 0x001c1c1c, 0x009e9e9e, 0x009c9c9c,
+	0x003a3a3a, 0x00cacaca, 0x00252525, 0x007b7b7b,
+	0x000d0d0d, 0x00717171, 0x005f5f5f, 0x001f1f1f,
+	0x00f8f8f8, 0x00d7d7d7, 0x003e3e3e, 0x009d9d9d,
+	0x007c7c7c, 0x00606060, 0x00b9b9b9, 0x00bebebe,
+	0x00bcbcbc, 0x008b8b8b, 0x00161616, 0x00343434,
+	0x004d4d4d, 0x00c3c3c3, 0x00727272, 0x00959595,
+	0x00ababab, 0x008e8e8e, 0x00bababa, 0x007a7a7a,
+	0x00b3b3b3, 0x00020202, 0x00b4b4b4, 0x00adadad,
+	0x00a2a2a2, 0x00acacac, 0x00d8d8d8, 0x009a9a9a,
+	0x00171717, 0x001a1a1a, 0x00353535, 0x00cccccc,
+	0x00f7f7f7, 0x00999999, 0x00616161, 0x005a5a5a,
+	0x00e8e8e8, 0x00242424, 0x00565656, 0x00404040,
+	0x00e1e1e1, 0x00636363, 0x00090909, 0x00333333,
+	0x00bfbfbf, 0x00989898, 0x00979797, 0x00858585,
+	0x00686868, 0x00fcfcfc, 0x00ececec, 0x000a0a0a,
+	0x00dadada, 0x006f6f6f, 0x00535353, 0x00626262,
+	0x00a3a3a3, 0x002e2e2e, 0x00080808, 0x00afafaf,
+	0x00282828, 0x00b0b0b0, 0x00747474, 0x00c2c2c2,
+	0x00bdbdbd, 0x00363636, 0x00222222, 0x00383838,
+	0x00646464, 0x001e1e1e, 0x00393939, 0x002c2c2c,
+	0x00a6a6a6, 0x00303030, 0x00e5e5e5, 0x00444444,
+	0x00fdfdfd, 0x00888888, 0x009f9f9f, 0x00656565,
+	0x00878787, 0x006b6b6b, 0x00f4f4f4, 0x00232323,
+	0x00484848, 0x00101010, 0x00d1d1d1, 0x00515151,
+	0x00c0c0c0, 0x00f9f9f9, 0x00d2d2d2, 0x00a0a0a0,
+	0x00555555, 0x00a1a1a1, 0x00414141, 0x00fafafa,
+	0x00434343, 0x00131313, 0x00c4c4c4, 0x002f2f2f,
+	0x00a8a8a8, 0x00b6b6b6, 0x003c3c3c, 0x002b2b2b,
+	0x00c1c1c1, 0x00ffffff, 0x00c8c8c8, 0x00a5a5a5,
+	0x00202020, 0x00898989, 0x00000000, 0x00909090,
+	0x00474747, 0x00efefef, 0x00eaeaea, 0x00b7b7b7,
+	0x00151515, 0x00060606, 0x00cdcdcd, 0x00b5b5b5,
+	0x00121212, 0x007e7e7e, 0x00bbbbbb, 0x00292929,
+	0x000f0f0f, 0x00b8b8b8, 0x00070707, 0x00040404,
+	0x009b9b9b, 0x00949494, 0x00212121, 0x00666666,
+	0x00e6e6e6, 0x00cecece, 0x00ededed, 0x00e7e7e7,
+	0x003b3b3b, 0x00fefefe, 0x007f7f7f, 0x00c5c5c5,
+	0x00a4a4a4, 0x00373737, 0x00b1b1b1, 0x004c4c4c,
+	0x00919191, 0x006e6e6e, 0x008d8d8d, 0x00767676,
+	0x00030303, 0x002d2d2d, 0x00dedede, 0x00969696,
+	0x00262626, 0x007d7d7d, 0x00c6c6c6, 0x005c5c5c,
+	0x00d3d3d3, 0x00f2f2f2, 0x004f4f4f, 0x00191919,
+	0x003f3f3f, 0x00dcdcdc, 0x00797979, 0x001d1d1d,
+	0x00525252, 0x00ebebeb, 0x00f3f3f3, 0x006d6d6d,
+	0x005e5e5e, 0x00fbfbfb, 0x00696969, 0x00b2b2b2,
+	0x00f0f0f0, 0x00313131, 0x000c0c0c, 0x00d4d4d4,
+	0x00cfcfcf, 0x008c8c8c, 0x00e2e2e2, 0x00757575,
+	0x00a9a9a9, 0x004a4a4a, 0x00575757, 0x00848484,
+	0x00111111, 0x00454545, 0x001b1b1b, 0x00f5f5f5,
+	0x00e4e4e4, 0x000e0e0e, 0x00737373, 0x00aaaaaa,
+	0x00f1f1f1, 0x00dddddd, 0x00595959, 0x00141414,
+	0x006c6c6c, 0x00929292, 0x00545454, 0x00d0d0d0,
+	0x00787878, 0x00707070, 0x00e3e3e3, 0x00494949,
+	0x00808080, 0x00505050, 0x00a7a7a7, 0x00f6f6f6,
+	0x00777777, 0x00939393, 0x00868686, 0x00838383,
+	0x002a2a2a, 0x00c7c7c7, 0x005b5b5b, 0x00e9e9e9,
+	0x00eeeeee, 0x008f8f8f, 0x00010101, 0x003d3d3d,
 };
 
 static const u32 camellia_sp3033[256] = {
-	0x38003838,0x41004141,0x16001616,0x76007676,
-	0xd900d9d9,0x93009393,0x60006060,0xf200f2f2,
-	0x72007272,0xc200c2c2,0xab00abab,0x9a009a9a,
-	0x75007575,0x06000606,0x57005757,0xa000a0a0,
-	0x91009191,0xf700f7f7,0xb500b5b5,0xc900c9c9,
-	0xa200a2a2,0x8c008c8c,0xd200d2d2,0x90009090,
-	0xf600f6f6,0x07000707,0xa700a7a7,0x27002727,
-	0x8e008e8e,0xb200b2b2,0x49004949,0xde00dede,
-	0x43004343,0x5c005c5c,0xd700d7d7,0xc700c7c7,
-	0x3e003e3e,0xf500f5f5,0x8f008f8f,0x67006767,
-	0x1f001f1f,0x18001818,0x6e006e6e,0xaf00afaf,
-	0x2f002f2f,0xe200e2e2,0x85008585,0x0d000d0d,
-	0x53005353,0xf000f0f0,0x9c009c9c,0x65006565,
-	0xea00eaea,0xa300a3a3,0xae00aeae,0x9e009e9e,
-	0xec00ecec,0x80008080,0x2d002d2d,0x6b006b6b,
-	0xa800a8a8,0x2b002b2b,0x36003636,0xa600a6a6,
-	0xc500c5c5,0x86008686,0x4d004d4d,0x33003333,
-	0xfd00fdfd,0x66006666,0x58005858,0x96009696,
-	0x3a003a3a,0x09000909,0x95009595,0x10001010,
-	0x78007878,0xd800d8d8,0x42004242,0xcc00cccc,
-	0xef00efef,0x26002626,0xe500e5e5,0x61006161,
-	0x1a001a1a,0x3f003f3f,0x3b003b3b,0x82008282,
-	0xb600b6b6,0xdb00dbdb,0xd400d4d4,0x98009898,
-	0xe800e8e8,0x8b008b8b,0x02000202,0xeb00ebeb,
-	0x0a000a0a,0x2c002c2c,0x1d001d1d,0xb000b0b0,
-	0x6f006f6f,0x8d008d8d,0x88008888,0x0e000e0e,
-	0x19001919,0x87008787,0x4e004e4e,0x0b000b0b,
-	0xa900a9a9,0x0c000c0c,0x79007979,0x11001111,
-	0x7f007f7f,0x22002222,0xe700e7e7,0x59005959,
-	0xe100e1e1,0xda00dada,0x3d003d3d,0xc800c8c8,
-	0x12001212,0x04000404,0x74007474,0x54005454,
-	0x30003030,0x7e007e7e,0xb400b4b4,0x28002828,
-	0x55005555,0x68006868,0x50005050,0xbe00bebe,
-	0xd000d0d0,0xc400c4c4,0x31003131,0xcb00cbcb,
-	0x2a002a2a,0xad00adad,0x0f000f0f,0xca00caca,
-	0x70007070,0xff00ffff,0x32003232,0x69006969,
-	0x08000808,0x62006262,0x00000000,0x24002424,
-	0xd100d1d1,0xfb00fbfb,0xba00baba,0xed00eded,
-	0x45004545,0x81008181,0x73007373,0x6d006d6d,
-	0x84008484,0x9f009f9f,0xee00eeee,0x4a004a4a,
-	0xc300c3c3,0x2e002e2e,0xc100c1c1,0x01000101,
-	0xe600e6e6,0x25002525,0x48004848,0x99009999,
-	0xb900b9b9,0xb300b3b3,0x7b007b7b,0xf900f9f9,
-	0xce00cece,0xbf00bfbf,0xdf00dfdf,0x71007171,
-	0x29002929,0xcd00cdcd,0x6c006c6c,0x13001313,
-	0x64006464,0x9b009b9b,0x63006363,0x9d009d9d,
-	0xc000c0c0,0x4b004b4b,0xb700b7b7,0xa500a5a5,
-	0x89008989,0x5f005f5f,0xb100b1b1,0x17001717,
-	0xf400f4f4,0xbc00bcbc,0xd300d3d3,0x46004646,
-	0xcf00cfcf,0x37003737,0x5e005e5e,0x47004747,
-	0x94009494,0xfa00fafa,0xfc00fcfc,0x5b005b5b,
-	0x97009797,0xfe00fefe,0x5a005a5a,0xac00acac,
-	0x3c003c3c,0x4c004c4c,0x03000303,0x35003535,
-	0xf300f3f3,0x23002323,0xb800b8b8,0x5d005d5d,
-	0x6a006a6a,0x92009292,0xd500d5d5,0x21002121,
-	0x44004444,0x51005151,0xc600c6c6,0x7d007d7d,
-	0x39003939,0x83008383,0xdc00dcdc,0xaa00aaaa,
-	0x7c007c7c,0x77007777,0x56005656,0x05000505,
-	0x1b001b1b,0xa400a4a4,0x15001515,0x34003434,
-	0x1e001e1e,0x1c001c1c,0xf800f8f8,0x52005252,
-	0x20002020,0x14001414,0xe900e9e9,0xbd00bdbd,
-	0xdd00dddd,0xe400e4e4,0xa100a1a1,0xe000e0e0,
-	0x8a008a8a,0xf100f1f1,0xd600d6d6,0x7a007a7a,
-	0xbb00bbbb,0xe300e3e3,0x40004040,0x4f004f4f,
+	0x38003838, 0x41004141, 0x16001616, 0x76007676,
+	0xd900d9d9, 0x93009393, 0x60006060, 0xf200f2f2,
+	0x72007272, 0xc200c2c2, 0xab00abab, 0x9a009a9a,
+	0x75007575, 0x06000606, 0x57005757, 0xa000a0a0,
+	0x91009191, 0xf700f7f7, 0xb500b5b5, 0xc900c9c9,
+	0xa200a2a2, 0x8c008c8c, 0xd200d2d2, 0x90009090,
+	0xf600f6f6, 0x07000707, 0xa700a7a7, 0x27002727,
+	0x8e008e8e, 0xb200b2b2, 0x49004949, 0xde00dede,
+	0x43004343, 0x5c005c5c, 0xd700d7d7, 0xc700c7c7,
+	0x3e003e3e, 0xf500f5f5, 0x8f008f8f, 0x67006767,
+	0x1f001f1f, 0x18001818, 0x6e006e6e, 0xaf00afaf,
+	0x2f002f2f, 0xe200e2e2, 0x85008585, 0x0d000d0d,
+	0x53005353, 0xf000f0f0, 0x9c009c9c, 0x65006565,
+	0xea00eaea, 0xa300a3a3, 0xae00aeae, 0x9e009e9e,
+	0xec00ecec, 0x80008080, 0x2d002d2d, 0x6b006b6b,
+	0xa800a8a8, 0x2b002b2b, 0x36003636, 0xa600a6a6,
+	0xc500c5c5, 0x86008686, 0x4d004d4d, 0x33003333,
+	0xfd00fdfd, 0x66006666, 0x58005858, 0x96009696,
+	0x3a003a3a, 0x09000909, 0x95009595, 0x10001010,
+	0x78007878, 0xd800d8d8, 0x42004242, 0xcc00cccc,
+	0xef00efef, 0x26002626, 0xe500e5e5, 0x61006161,
+	0x1a001a1a, 0x3f003f3f, 0x3b003b3b, 0x82008282,
+	0xb600b6b6, 0xdb00dbdb, 0xd400d4d4, 0x98009898,
+	0xe800e8e8, 0x8b008b8b, 0x02000202, 0xeb00ebeb,
+	0x0a000a0a, 0x2c002c2c, 0x1d001d1d, 0xb000b0b0,
+	0x6f006f6f, 0x8d008d8d, 0x88008888, 0x0e000e0e,
+	0x19001919, 0x87008787, 0x4e004e4e, 0x0b000b0b,
+	0xa900a9a9, 0x0c000c0c, 0x79007979, 0x11001111,
+	0x7f007f7f, 0x22002222, 0xe700e7e7, 0x59005959,
+	0xe100e1e1, 0xda00dada, 0x3d003d3d, 0xc800c8c8,
+	0x12001212, 0x04000404, 0x74007474, 0x54005454,
+	0x30003030, 0x7e007e7e, 0xb400b4b4, 0x28002828,
+	0x55005555, 0x68006868, 0x50005050, 0xbe00bebe,
+	0xd000d0d0, 0xc400c4c4, 0x31003131, 0xcb00cbcb,
+	0x2a002a2a, 0xad00adad, 0x0f000f0f, 0xca00caca,
+	0x70007070, 0xff00ffff, 0x32003232, 0x69006969,
+	0x08000808, 0x62006262, 0x00000000, 0x24002424,
+	0xd100d1d1, 0xfb00fbfb, 0xba00baba, 0xed00eded,
+	0x45004545, 0x81008181, 0x73007373, 0x6d006d6d,
+	0x84008484, 0x9f009f9f, 0xee00eeee, 0x4a004a4a,
+	0xc300c3c3, 0x2e002e2e, 0xc100c1c1, 0x01000101,
+	0xe600e6e6, 0x25002525, 0x48004848, 0x99009999,
+	0xb900b9b9, 0xb300b3b3, 0x7b007b7b, 0xf900f9f9,
+	0xce00cece, 0xbf00bfbf, 0xdf00dfdf, 0x71007171,
+	0x29002929, 0xcd00cdcd, 0x6c006c6c, 0x13001313,
+	0x64006464, 0x9b009b9b, 0x63006363, 0x9d009d9d,
+	0xc000c0c0, 0x4b004b4b, 0xb700b7b7, 0xa500a5a5,
+	0x89008989, 0x5f005f5f, 0xb100b1b1, 0x17001717,
+	0xf400f4f4, 0xbc00bcbc, 0xd300d3d3, 0x46004646,
+	0xcf00cfcf, 0x37003737, 0x5e005e5e, 0x47004747,
+	0x94009494, 0xfa00fafa, 0xfc00fcfc, 0x5b005b5b,
+	0x97009797, 0xfe00fefe, 0x5a005a5a, 0xac00acac,
+	0x3c003c3c, 0x4c004c4c, 0x03000303, 0x35003535,
+	0xf300f3f3, 0x23002323, 0xb800b8b8, 0x5d005d5d,
+	0x6a006a6a, 0x92009292, 0xd500d5d5, 0x21002121,
+	0x44004444, 0x51005151, 0xc600c6c6, 0x7d007d7d,
+	0x39003939, 0x83008383, 0xdc00dcdc, 0xaa00aaaa,
+	0x7c007c7c, 0x77007777, 0x56005656, 0x05000505,
+	0x1b001b1b, 0xa400a4a4, 0x15001515, 0x34003434,
+	0x1e001e1e, 0x1c001c1c, 0xf800f8f8, 0x52005252,
+	0x20002020, 0x14001414, 0xe900e9e9, 0xbd00bdbd,
+	0xdd00dddd, 0xe400e4e4, 0xa100a1a1, 0xe000e0e0,
+	0x8a008a8a, 0xf100f1f1, 0xd600d6d6, 0x7a007a7a,
+	0xbb00bbbb, 0xe300e3e3, 0x40004040, 0x4f004f4f,
 };
 
 static const u32 camellia_sp4404[256] = {
-	0x70700070,0x2c2c002c,0xb3b300b3,0xc0c000c0,
-	0xe4e400e4,0x57570057,0xeaea00ea,0xaeae00ae,
-	0x23230023,0x6b6b006b,0x45450045,0xa5a500a5,
-	0xeded00ed,0x4f4f004f,0x1d1d001d,0x92920092,
-	0x86860086,0xafaf00af,0x7c7c007c,0x1f1f001f,
-	0x3e3e003e,0xdcdc00dc,0x5e5e005e,0x0b0b000b,
-	0xa6a600a6,0x39390039,0xd5d500d5,0x5d5d005d,
-	0xd9d900d9,0x5a5a005a,0x51510051,0x6c6c006c,
-	0x8b8b008b,0x9a9a009a,0xfbfb00fb,0xb0b000b0,
-	0x74740074,0x2b2b002b,0xf0f000f0,0x84840084,
-	0xdfdf00df,0xcbcb00cb,0x34340034,0x76760076,
-	0x6d6d006d,0xa9a900a9,0xd1d100d1,0x04040004,
-	0x14140014,0x3a3a003a,0xdede00de,0x11110011,
-	0x32320032,0x9c9c009c,0x53530053,0xf2f200f2,
-	0xfefe00fe,0xcfcf00cf,0xc3c300c3,0x7a7a007a,
-	0x24240024,0xe8e800e8,0x60600060,0x69690069,
-	0xaaaa00aa,0xa0a000a0,0xa1a100a1,0x62620062,
-	0x54540054,0x1e1e001e,0xe0e000e0,0x64640064,
-	0x10100010,0x00000000,0xa3a300a3,0x75750075,
-	0x8a8a008a,0xe6e600e6,0x09090009,0xdddd00dd,
-	0x87870087,0x83830083,0xcdcd00cd,0x90900090,
-	0x73730073,0xf6f600f6,0x9d9d009d,0xbfbf00bf,
-	0x52520052,0xd8d800d8,0xc8c800c8,0xc6c600c6,
-	0x81810081,0x6f6f006f,0x13130013,0x63630063,
-	0xe9e900e9,0xa7a700a7,0x9f9f009f,0xbcbc00bc,
-	0x29290029,0xf9f900f9,0x2f2f002f,0xb4b400b4,
-	0x78780078,0x06060006,0xe7e700e7,0x71710071,
-	0xd4d400d4,0xabab00ab,0x88880088,0x8d8d008d,
-	0x72720072,0xb9b900b9,0xf8f800f8,0xacac00ac,
-	0x36360036,0x2a2a002a,0x3c3c003c,0xf1f100f1,
-	0x40400040,0xd3d300d3,0xbbbb00bb,0x43430043,
-	0x15150015,0xadad00ad,0x77770077,0x80800080,
-	0x82820082,0xecec00ec,0x27270027,0xe5e500e5,
-	0x85850085,0x35350035,0x0c0c000c,0x41410041,
-	0xefef00ef,0x93930093,0x19190019,0x21210021,
-	0x0e0e000e,0x4e4e004e,0x65650065,0xbdbd00bd,
-	0xb8b800b8,0x8f8f008f,0xebeb00eb,0xcece00ce,
-	0x30300030,0x5f5f005f,0xc5c500c5,0x1a1a001a,
-	0xe1e100e1,0xcaca00ca,0x47470047,0x3d3d003d,
-	0x01010001,0xd6d600d6,0x56560056,0x4d4d004d,
-	0x0d0d000d,0x66660066,0xcccc00cc,0x2d2d002d,
-	0x12120012,0x20200020,0xb1b100b1,0x99990099,
-	0x4c4c004c,0xc2c200c2,0x7e7e007e,0x05050005,
-	0xb7b700b7,0x31310031,0x17170017,0xd7d700d7,
-	0x58580058,0x61610061,0x1b1b001b,0x1c1c001c,
-	0x0f0f000f,0x16160016,0x18180018,0x22220022,
-	0x44440044,0xb2b200b2,0xb5b500b5,0x91910091,
-	0x08080008,0xa8a800a8,0xfcfc00fc,0x50500050,
-	0xd0d000d0,0x7d7d007d,0x89890089,0x97970097,
-	0x5b5b005b,0x95950095,0xffff00ff,0xd2d200d2,
-	0xc4c400c4,0x48480048,0xf7f700f7,0xdbdb00db,
-	0x03030003,0xdada00da,0x3f3f003f,0x94940094,
-	0x5c5c005c,0x02020002,0x4a4a004a,0x33330033,
-	0x67670067,0xf3f300f3,0x7f7f007f,0xe2e200e2,
-	0x9b9b009b,0x26260026,0x37370037,0x3b3b003b,
-	0x96960096,0x4b4b004b,0xbebe00be,0x2e2e002e,
-	0x79790079,0x8c8c008c,0x6e6e006e,0x8e8e008e,
-	0xf5f500f5,0xb6b600b6,0xfdfd00fd,0x59590059,
-	0x98980098,0x6a6a006a,0x46460046,0xbaba00ba,
-	0x25250025,0x42420042,0xa2a200a2,0xfafa00fa,
-	0x07070007,0x55550055,0xeeee00ee,0x0a0a000a,
-	0x49490049,0x68680068,0x38380038,0xa4a400a4,
-	0x28280028,0x7b7b007b,0xc9c900c9,0xc1c100c1,
-	0xe3e300e3,0xf4f400f4,0xc7c700c7,0x9e9e009e,
+	0x70700070, 0x2c2c002c, 0xb3b300b3, 0xc0c000c0,
+	0xe4e400e4, 0x57570057, 0xeaea00ea, 0xaeae00ae,
+	0x23230023, 0x6b6b006b, 0x45450045, 0xa5a500a5,
+	0xeded00ed, 0x4f4f004f, 0x1d1d001d, 0x92920092,
+	0x86860086, 0xafaf00af, 0x7c7c007c, 0x1f1f001f,
+	0x3e3e003e, 0xdcdc00dc, 0x5e5e005e, 0x0b0b000b,
+	0xa6a600a6, 0x39390039, 0xd5d500d5, 0x5d5d005d,
+	0xd9d900d9, 0x5a5a005a, 0x51510051, 0x6c6c006c,
+	0x8b8b008b, 0x9a9a009a, 0xfbfb00fb, 0xb0b000b0,
+	0x74740074, 0x2b2b002b, 0xf0f000f0, 0x84840084,
+	0xdfdf00df, 0xcbcb00cb, 0x34340034, 0x76760076,
+	0x6d6d006d, 0xa9a900a9, 0xd1d100d1, 0x04040004,
+	0x14140014, 0x3a3a003a, 0xdede00de, 0x11110011,
+	0x32320032, 0x9c9c009c, 0x53530053, 0xf2f200f2,
+	0xfefe00fe, 0xcfcf00cf, 0xc3c300c3, 0x7a7a007a,
+	0x24240024, 0xe8e800e8, 0x60600060, 0x69690069,
+	0xaaaa00aa, 0xa0a000a0, 0xa1a100a1, 0x62620062,
+	0x54540054, 0x1e1e001e, 0xe0e000e0, 0x64640064,
+	0x10100010, 0x00000000, 0xa3a300a3, 0x75750075,
+	0x8a8a008a, 0xe6e600e6, 0x09090009, 0xdddd00dd,
+	0x87870087, 0x83830083, 0xcdcd00cd, 0x90900090,
+	0x73730073, 0xf6f600f6, 0x9d9d009d, 0xbfbf00bf,
+	0x52520052, 0xd8d800d8, 0xc8c800c8, 0xc6c600c6,
+	0x81810081, 0x6f6f006f, 0x13130013, 0x63630063,
+	0xe9e900e9, 0xa7a700a7, 0x9f9f009f, 0xbcbc00bc,
+	0x29290029, 0xf9f900f9, 0x2f2f002f, 0xb4b400b4,
+	0x78780078, 0x06060006, 0xe7e700e7, 0x71710071,
+	0xd4d400d4, 0xabab00ab, 0x88880088, 0x8d8d008d,
+	0x72720072, 0xb9b900b9, 0xf8f800f8, 0xacac00ac,
+	0x36360036, 0x2a2a002a, 0x3c3c003c, 0xf1f100f1,
+	0x40400040, 0xd3d300d3, 0xbbbb00bb, 0x43430043,
+	0x15150015, 0xadad00ad, 0x77770077, 0x80800080,
+	0x82820082, 0xecec00ec, 0x27270027, 0xe5e500e5,
+	0x85850085, 0x35350035, 0x0c0c000c, 0x41410041,
+	0xefef00ef, 0x93930093, 0x19190019, 0x21210021,
+	0x0e0e000e, 0x4e4e004e, 0x65650065, 0xbdbd00bd,
+	0xb8b800b8, 0x8f8f008f, 0xebeb00eb, 0xcece00ce,
+	0x30300030, 0x5f5f005f, 0xc5c500c5, 0x1a1a001a,
+	0xe1e100e1, 0xcaca00ca, 0x47470047, 0x3d3d003d,
+	0x01010001, 0xd6d600d6, 0x56560056, 0x4d4d004d,
+	0x0d0d000d, 0x66660066, 0xcccc00cc, 0x2d2d002d,
+	0x12120012, 0x20200020, 0xb1b100b1, 0x99990099,
+	0x4c4c004c, 0xc2c200c2, 0x7e7e007e, 0x05050005,
+	0xb7b700b7, 0x31310031, 0x17170017, 0xd7d700d7,
+	0x58580058, 0x61610061, 0x1b1b001b, 0x1c1c001c,
+	0x0f0f000f, 0x16160016, 0x18180018, 0x22220022,
+	0x44440044, 0xb2b200b2, 0xb5b500b5, 0x91910091,
+	0x08080008, 0xa8a800a8, 0xfcfc00fc, 0x50500050,
+	0xd0d000d0, 0x7d7d007d, 0x89890089, 0x97970097,
+	0x5b5b005b, 0x95950095, 0xffff00ff, 0xd2d200d2,
+	0xc4c400c4, 0x48480048, 0xf7f700f7, 0xdbdb00db,
+	0x03030003, 0xdada00da, 0x3f3f003f, 0x94940094,
+	0x5c5c005c, 0x02020002, 0x4a4a004a, 0x33330033,
+	0x67670067, 0xf3f300f3, 0x7f7f007f, 0xe2e200e2,
+	0x9b9b009b, 0x26260026, 0x37370037, 0x3b3b003b,
+	0x96960096, 0x4b4b004b, 0xbebe00be, 0x2e2e002e,
+	0x79790079, 0x8c8c008c, 0x6e6e006e, 0x8e8e008e,
+	0xf5f500f5, 0xb6b600b6, 0xfdfd00fd, 0x59590059,
+	0x98980098, 0x6a6a006a, 0x46460046, 0xbaba00ba,
+	0x25250025, 0x42420042, 0xa2a200a2, 0xfafa00fa,
+	0x07070007, 0x55550055, 0xeeee00ee, 0x0a0a000a,
+	0x49490049, 0x68680068, 0x38380038, 0xa4a400a4,
+	0x28280028, 0x7b7b007b, 0xc9c900c9, 0xc1c100c1,
+	0xe3e300e3, 0xf4f400f4, 0xc7c700c7, 0x9e9e009e,
 };
 
 
@@ -344,7 +344,7 @@
 	lr = (lr << bits) + (rl >> (32 - bits));	\
 	rl = (rl << bits) + (rr >> (32 - bits));	\
 	rr = (rr << bits) + (w0 >> (32 - bits));	\
-    } while(0)
+    } while (0)
 
 #define ROLDQo32(ll, lr, rl, rr, w0, w1, bits)		\
     do {						\
@@ -354,7 +354,7 @@
 	lr = (rl << (bits - 32)) + (rr >> (64 - bits));	\
 	rl = (rr << (bits - 32)) + (w0 >> (64 - bits));	\
 	rr = (w0 << (bits - 32)) + (w1 >> (64 - bits));	\
-    } while(0)
+    } while (0)
 
 #define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1)	\
     do {							\
@@ -373,7 +373,7 @@
 	yl ^= yr;						\
 	yr = ror32(yr, 8);					\
 	yr ^= yl;						\
-    } while(0)
+    } while (0)
 
 #define SUBKEY_L(INDEX) (subkey[(INDEX)*2])
 #define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1])
@@ -835,7 +835,7 @@
 static void camellia_setup192(const unsigned char *key, u32 *subkey)
 {
 	unsigned char kk[32];
-	u32 krll, krlr, krrl,krrr;
+	u32 krll, krlr, krrl, krrr;
 
 	memcpy(kk, key, 24);
 	memcpy((unsigned char *)&krll, key+16, 4);
@@ -865,7 +865,7 @@
 	t1 |= lr;							\
 	ll ^= t1;							\
 	rr ^= rol32(t3, 1);						\
-    } while(0)
+    } while (0)
 
 #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir)		\
     do {								\
@@ -881,12 +881,12 @@
 	ir ^= il ^ kr;							\
 	yl ^= ir;							\
 	yr ^= ror32(il, 8) ^ ir;						\
-    } while(0)
+    } while (0)
 
 /* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
 static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
 {
-	u32 il,ir,t0,t1;               /* temporary variables */
+	u32 il, ir, t0, t1;            /* temporary variables */
 
 	/* pre whitening but absorb kw2 */
 	io[0] ^= SUBKEY_L(0);
@@ -894,30 +894,30 @@
 
 	/* main iteration */
 #define ROUNDS(i) do { \
-	CAMELLIA_ROUNDSM(io[0],io[1], \
-			 SUBKEY_L(i + 2),SUBKEY_R(i + 2), \
-			 io[2],io[3],il,ir); \
-	CAMELLIA_ROUNDSM(io[2],io[3], \
-			 SUBKEY_L(i + 3),SUBKEY_R(i + 3), \
-			 io[0],io[1],il,ir); \
-	CAMELLIA_ROUNDSM(io[0],io[1], \
-			 SUBKEY_L(i + 4),SUBKEY_R(i + 4), \
-			 io[2],io[3],il,ir); \
-	CAMELLIA_ROUNDSM(io[2],io[3], \
-			 SUBKEY_L(i + 5),SUBKEY_R(i + 5), \
-			 io[0],io[1],il,ir); \
-	CAMELLIA_ROUNDSM(io[0],io[1], \
-			 SUBKEY_L(i + 6),SUBKEY_R(i + 6), \
-			 io[2],io[3],il,ir); \
-	CAMELLIA_ROUNDSM(io[2],io[3], \
-			 SUBKEY_L(i + 7),SUBKEY_R(i + 7), \
-			 io[0],io[1],il,ir); \
+	CAMELLIA_ROUNDSM(io[0], io[1], \
+			 SUBKEY_L(i + 2), SUBKEY_R(i + 2), \
+			 io[2], io[3], il, ir); \
+	CAMELLIA_ROUNDSM(io[2], io[3], \
+			 SUBKEY_L(i + 3), SUBKEY_R(i + 3), \
+			 io[0], io[1], il, ir); \
+	CAMELLIA_ROUNDSM(io[0], io[1], \
+			 SUBKEY_L(i + 4), SUBKEY_R(i + 4), \
+			 io[2], io[3], il, ir); \
+	CAMELLIA_ROUNDSM(io[2], io[3], \
+			 SUBKEY_L(i + 5), SUBKEY_R(i + 5), \
+			 io[0], io[1], il, ir); \
+	CAMELLIA_ROUNDSM(io[0], io[1], \
+			 SUBKEY_L(i + 6), SUBKEY_R(i + 6), \
+			 io[2], io[3], il, ir); \
+	CAMELLIA_ROUNDSM(io[2], io[3], \
+			 SUBKEY_L(i + 7), SUBKEY_R(i + 7), \
+			 io[0], io[1], il, ir); \
 } while (0)
 #define FLS(i) do { \
-	CAMELLIA_FLS(io[0],io[1],io[2],io[3], \
-		     SUBKEY_L(i + 0),SUBKEY_R(i + 0), \
-		     SUBKEY_L(i + 1),SUBKEY_R(i + 1), \
-		     t0,t1,il,ir); \
+	CAMELLIA_FLS(io[0], io[1], io[2], io[3], \
+		     SUBKEY_L(i + 0), SUBKEY_R(i + 0), \
+		     SUBKEY_L(i + 1), SUBKEY_R(i + 1), \
+		     t0, t1, il, ir); \
 } while (0)
 
 	ROUNDS(0);
@@ -941,7 +941,7 @@
 
 static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i)
 {
-	u32 il,ir,t0,t1;               /* temporary variables */
+	u32 il, ir, t0, t1;            /* temporary variables */
 
 	/* pre whitening but absorb kw2 */
 	io[0] ^= SUBKEY_L(i);
@@ -949,30 +949,30 @@
 
 	/* main iteration */
 #define ROUNDS(i) do { \
-	CAMELLIA_ROUNDSM(io[0],io[1], \
-			 SUBKEY_L(i + 7),SUBKEY_R(i + 7), \
-			 io[2],io[3],il,ir); \
-	CAMELLIA_ROUNDSM(io[2],io[3], \
-			 SUBKEY_L(i + 6),SUBKEY_R(i + 6), \
-			 io[0],io[1],il,ir); \
-	CAMELLIA_ROUNDSM(io[0],io[1], \
-			 SUBKEY_L(i + 5),SUBKEY_R(i + 5), \
-			 io[2],io[3],il,ir); \
-	CAMELLIA_ROUNDSM(io[2],io[3], \
-			 SUBKEY_L(i + 4),SUBKEY_R(i + 4), \
-			 io[0],io[1],il,ir); \
-	CAMELLIA_ROUNDSM(io[0],io[1], \
-			 SUBKEY_L(i + 3),SUBKEY_R(i + 3), \
-			 io[2],io[3],il,ir); \
-	CAMELLIA_ROUNDSM(io[2],io[3], \
-			 SUBKEY_L(i + 2),SUBKEY_R(i + 2), \
-			 io[0],io[1],il,ir); \
+	CAMELLIA_ROUNDSM(io[0], io[1], \
+			 SUBKEY_L(i + 7), SUBKEY_R(i + 7), \
+			 io[2], io[3], il, ir); \
+	CAMELLIA_ROUNDSM(io[2], io[3], \
+			 SUBKEY_L(i + 6), SUBKEY_R(i + 6), \
+			 io[0], io[1], il, ir); \
+	CAMELLIA_ROUNDSM(io[0], io[1], \
+			 SUBKEY_L(i + 5), SUBKEY_R(i + 5), \
+			 io[2], io[3], il, ir); \
+	CAMELLIA_ROUNDSM(io[2], io[3], \
+			 SUBKEY_L(i + 4), SUBKEY_R(i + 4), \
+			 io[0], io[1], il, ir); \
+	CAMELLIA_ROUNDSM(io[0], io[1], \
+			 SUBKEY_L(i + 3), SUBKEY_R(i + 3), \
+			 io[2], io[3], il, ir); \
+	CAMELLIA_ROUNDSM(io[2], io[3], \
+			 SUBKEY_L(i + 2), SUBKEY_R(i + 2), \
+			 io[0], io[1], il, ir); \
 } while (0)
 #define FLS(i) do { \
-	CAMELLIA_FLS(io[0],io[1],io[2],io[3], \
-		     SUBKEY_L(i + 1),SUBKEY_R(i + 1), \
-		     SUBKEY_L(i + 0),SUBKEY_R(i + 0), \
-		     t0,t1,il,ir); \
+	CAMELLIA_FLS(io[0], io[1], io[2], io[3], \
+		     SUBKEY_L(i + 1), SUBKEY_R(i + 1), \
+		     SUBKEY_L(i + 0), SUBKEY_R(i + 0), \
+		     t0, t1, il, ir); \
 } while (0)
 
 	if (i == 32) {
diff --git a/crypto/cast5.c b/crypto/cast5.c
index 8cbe28f..a1d2294 100644
--- a/crypto/cast5.c
+++ b/crypto/cast5.c
@@ -569,12 +569,12 @@
 	0xeaee6801, 0x8db2a283, 0xea8bf59e
 };
 
-#define F1(D,m,r)  (  (I = ((m) + (D))), (I=rol32(I,(r))),   \
-    (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]) )
-#define F2(D,m,r)  (  (I = ((m) ^ (D))), (I=rol32(I,(r))),   \
-    (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]) )
-#define F3(D,m,r)  (  (I = ((m) - (D))), (I=rol32(I,(r))),   \
-    (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) )
+#define F1(D, m, r)  ((I = ((m) + (D))), (I = rol32(I, (r))),   \
+    (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
+#define F2(D, m, r)  ((I = ((m) ^ (D))), (I = rol32(I, (r))),   \
+    (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
+#define F3(D, m, r)  ((I = ((m) - (D))), (I = rol32(I, (r))),   \
+    (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
 
 
 static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
@@ -694,7 +694,7 @@
 	dst[1] = cpu_to_be32(l);
 }
 
-static void key_schedule(u32 * x, u32 * z, u32 * k)
+static void key_schedule(u32 *x, u32 *z, u32 *k)
 {
 
 #define xi(i)   ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff)
diff --git a/crypto/cast6.c b/crypto/cast6.c
index 007d02b..e0c15a6 100644
--- a/crypto/cast6.c
+++ b/crypto/cast6.c
@@ -11,7 +11,7 @@
  * under the terms of GNU General Public License as published by the Free
  * Software Foundation; either version 2 of the License, or (at your option)
  * any later version.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
@@ -35,12 +35,12 @@
 	u8 Kr[12][4];
 };
 
-#define F1(D,r,m)  (  (I = ((m) + (D))), (I=rol32(I,(r))),   \
-    (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]) )
-#define F2(D,r,m)  (  (I = ((m) ^ (D))), (I=rol32(I,(r))),   \
-    (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]) )
-#define F3(D,r,m)  (  (I = ((m) - (D))), (I=rol32(I,(r))),   \
-    (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) )
+#define F1(D, r, m)  ((I = ((m) + (D))), (I = rol32(I, (r))),   \
+    (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
+#define F2(D, r, m)  ((I = ((m) ^ (D))), (I = rol32(I, (r))),   \
+    (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
+#define F3(D, r, m)  ((I = ((m) - (D))), (I = rol32(I, (r))),   \
+    (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
 
 static const u32 s1[256] = {
 	0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
@@ -312,7 +312,7 @@
 
 static const u32 Tm[24][8] = {
 	{ 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d,
-		0x84c413be, 0xf39dff5f, 0x6277eb00 } , 
+		0x84c413be, 0xf39dff5f, 0x6277eb00 } ,
 	{ 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525,
 		0xfb9370c6, 0x6a6d5c67, 0xd9474808 } ,
 	{ 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d,
@@ -369,7 +369,8 @@
 };
 
 /* forward octave */
-static void W(u32 *key, unsigned int i) {
+static void W(u32 *key, unsigned int i)
+{
 	u32 I;
 	key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]);
 	key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]);
@@ -377,7 +378,7 @@
 	key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]);
 	key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]);
 	key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]);
-	key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]);	
+	key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]);
 	key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]);
 }
 
@@ -393,11 +394,11 @@
 	if (key_len % 4 != 0) {
 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 		return -EINVAL;
-	}	
+	}
 
-	memset (p_key, 0, 32);
-	memcpy (p_key, in_key, key_len);
-	
+	memset(p_key, 0, 32);
+	memcpy(p_key, in_key, key_len);
+
 	key[0] = be32_to_cpu(p_key[0]);		/* A */
 	key[1] = be32_to_cpu(p_key[1]);		/* B */
 	key[2] = be32_to_cpu(p_key[2]);		/* C */
@@ -406,18 +407,16 @@
 	key[5] = be32_to_cpu(p_key[5]);		/* F */
 	key[6] = be32_to_cpu(p_key[6]);		/* G */
 	key[7] = be32_to_cpu(p_key[7]);		/* H */
-	
-
 
 	for (i = 0; i < 12; i++) {
-		W (key, 2 * i);
-		W (key, 2 * i + 1);
-		
+		W(key, 2 * i);
+		W(key, 2 * i + 1);
+
 		c->Kr[i][0] = key[0] & 0x1f;
 		c->Kr[i][1] = key[2] & 0x1f;
 		c->Kr[i][2] = key[4] & 0x1f;
 		c->Kr[i][3] = key[6] & 0x1f;
-		
+
 		c->Km[i][0] = key[7];
 		c->Km[i][1] = key[5];
 		c->Km[i][2] = key[3];
@@ -428,21 +427,23 @@
 }
 
 /*forward quad round*/
-static void Q (u32 * block, u8 * Kr, u32 * Km) {
+static void Q(u32 *block, u8 *Kr, u32 *Km)
+{
 	u32 I;
 	block[2] ^= F1(block[3], Kr[0], Km[0]);
 	block[1] ^= F2(block[2], Kr[1], Km[1]);
 	block[0] ^= F3(block[1], Kr[2], Km[2]);
-	block[3] ^= F1(block[0], Kr[3], Km[3]);		
+	block[3] ^= F1(block[0], Kr[3], Km[3]);
 }
 
 /*reverse quad round*/
-static void QBAR (u32 * block, u8 * Kr, u32 * Km) {
+static void QBAR(u32 *block, u8 *Kr, u32 *Km)
+{
 	u32 I;
-        block[3] ^= F1(block[0], Kr[3], Km[3]);
-        block[0] ^= F3(block[1], Kr[2], Km[2]);
-        block[1] ^= F2(block[2], Kr[1], Km[1]);
-        block[2] ^= F1(block[3], Kr[0], Km[0]);
+	block[3] ^= F1(block[0], Kr[3], Km[3]);
+	block[0] ^= F3(block[1], Kr[2], Km[2]);
+	block[1] ^= F2(block[2], Kr[1], Km[1]);
+	block[2] ^= F1(block[3], Kr[0], Km[0]);
 }
 
 static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
@@ -451,64 +452,65 @@
 	const __be32 *src = (const __be32 *)inbuf;
 	__be32 *dst = (__be32 *)outbuf;
 	u32 block[4];
-	u32 * Km; 
-	u8 * Kr;
+	u32 *Km;
+	u8 *Kr;
 
 	block[0] = be32_to_cpu(src[0]);
 	block[1] = be32_to_cpu(src[1]);
 	block[2] = be32_to_cpu(src[2]);
 	block[3] = be32_to_cpu(src[3]);
 
-	Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km);
-	Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km);
-	Km = c->Km[2]; Kr = c->Kr[2]; Q (block, Kr, Km);
-	Km = c->Km[3]; Kr = c->Kr[3]; Q (block, Kr, Km);
-	Km = c->Km[4]; Kr = c->Kr[4]; Q (block, Kr, Km);
-	Km = c->Km[5]; Kr = c->Kr[5]; Q (block, Kr, Km);
-	Km = c->Km[6]; Kr = c->Kr[6]; QBAR (block, Kr, Km);
-	Km = c->Km[7]; Kr = c->Kr[7]; QBAR (block, Kr, Km);
-	Km = c->Km[8]; Kr = c->Kr[8]; QBAR (block, Kr, Km);
-	Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km);
-	Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km);
-	Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km);
+	Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km);
+	Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km);
+	Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km);
+	Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km);
+	Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km);
+	Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km);
+	Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km);
+	Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km);
+	Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km);
+	Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km);
+	Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km);
+	Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km);
 
 	dst[0] = cpu_to_be32(block[0]);
 	dst[1] = cpu_to_be32(block[1]);
 	dst[2] = cpu_to_be32(block[2]);
 	dst[3] = cpu_to_be32(block[3]);
-}	
+}
 
-static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) {
-	struct cast6_ctx * c = crypto_tfm_ctx(tfm);
+static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
+{
+	struct cast6_ctx *c = crypto_tfm_ctx(tfm);
 	const __be32 *src = (const __be32 *)inbuf;
 	__be32 *dst = (__be32 *)outbuf;
 	u32 block[4];
-	u32 * Km; 
-	u8 * Kr;
+	u32 *Km;
+	u8 *Kr;
 
 	block[0] = be32_to_cpu(src[0]);
 	block[1] = be32_to_cpu(src[1]);
 	block[2] = be32_to_cpu(src[2]);
 	block[3] = be32_to_cpu(src[3]);
 
-	Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km);
-	Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km);
-	Km = c->Km[9]; Kr = c->Kr[9]; Q (block, Kr, Km);
-	Km = c->Km[8]; Kr = c->Kr[8]; Q (block, Kr, Km);
-	Km = c->Km[7]; Kr = c->Kr[7]; Q (block, Kr, Km);
-	Km = c->Km[6]; Kr = c->Kr[6]; Q (block, Kr, Km);
-	Km = c->Km[5]; Kr = c->Kr[5]; QBAR (block, Kr, Km);
-	Km = c->Km[4]; Kr = c->Kr[4]; QBAR (block, Kr, Km);
-	Km = c->Km[3]; Kr = c->Kr[3]; QBAR (block, Kr, Km);
-	Km = c->Km[2]; Kr = c->Kr[2]; QBAR (block, Kr, Km);
-	Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km);
-	Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km);
-	
+	Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km);
+	Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km);
+	Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km);
+	Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km);
+	Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km);
+	Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km);
+	Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km);
+	Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km);
+	Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km);
+	Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km);
+	Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km);
+	Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km);
+
 	dst[0] = cpu_to_be32(block[0]);
 	dst[1] = cpu_to_be32(block[1]);
 	dst[2] = cpu_to_be32(block[2]);
 	dst[3] = cpu_to_be32(block[3]);
-}	
+}
 
 static struct crypto_alg alg = {
 	.cra_name = "cast6",
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 9a1a731..39541e0 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -8,7 +8,7 @@
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option) 
+ * Software Foundation; either version 2 of the License, or (at your option)
  * any later version.
  *
  */
diff --git a/crypto/compress.c b/crypto/compress.c
index 1ee3570..c33f076 100644
--- a/crypto/compress.c
+++ b/crypto/compress.c
@@ -7,7 +7,7 @@
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option) 
+ * Software Foundation; either version 2 of the License, or (at your option)
  * any later version.
  *
  */
@@ -39,7 +39,7 @@
 
 	ops->cot_compress = crypto_compress;
 	ops->cot_decompress = crypto_decompress;
-	
+
 	return 0;
 }
 
diff --git a/crypto/crc32c.c b/crypto/crc32c.c
index 973bc2c..de9e55c 100644
--- a/crypto/crc32c.c
+++ b/crypto/crc32c.c
@@ -1,4 +1,4 @@
-/* 
+/*
  * Cryptographic API.
  *
  * CRC32C chksum
@@ -30,7 +30,7 @@
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option) 
+ * Software Foundation; either version 2 of the License, or (at your option)
  * any later version.
  *
  */
@@ -142,7 +142,7 @@
 }
 
 /*
- * Steps through buffer one byte at at time, calculates reflected 
+ * Steps through buffer one byte at at time, calculates reflected
  * crc using table.
  */
 
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index cb71c91..07a8a96 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -1,11 +1,11 @@
-/* 
+/*
  * Cryptographic API.
  *
  * Null algorithms, aka Much Ado About Nothing.
  *
  * These are needed for IPsec, and may be useful in general for
  * testing & debugging.
- * 
+ *
  * The null cipher is compliant with RFC2410.
  *
  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
@@ -163,7 +163,7 @@
 static int __init crypto_null_mod_init(void)
 {
 	int ret = 0;
-	
+
 	ret = crypto_register_alg(&cipher_null);
 	if (ret < 0)
 		goto out;
@@ -180,7 +180,7 @@
 	if (ret < 0)
 		goto out_unregister_digest;
 
-out:	
+out:
 	return ret;
 
 out_unregister_digest:
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 9128da4..463dc85 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -1,14 +1,14 @@
-/* 
+/*
  * Cryptographic API.
  *
  * Deflate algorithm (RFC 1951), implemented here primarily for use
  * by IPCOMP (RFC 3173 & RFC 2394).
  *
  * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
- * 
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option) 
+ * Software Foundation; either version 2 of the License, or (at your option)
  * any later version.
  *
  * FIXME: deflate transforms will require up to a total of about 436k of kernel
@@ -49,7 +49,7 @@
 	struct z_stream_s *stream = &ctx->comp_stream;
 
 	stream->workspace = vmalloc(zlib_deflate_workspacesize());
-	if (!stream->workspace ) {
+	if (!stream->workspace) {
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -61,7 +61,7 @@
 		ret = -EINVAL;
 		goto out_free;
 	}
-out:	
+out:
 	return ret;
 out_free:
 	vfree(stream->workspace);
@@ -74,7 +74,7 @@
 	struct z_stream_s *stream = &ctx->decomp_stream;
 
 	stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
-	if (!stream->workspace ) {
+	if (!stream->workspace) {
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -106,7 +106,7 @@
 {
 	struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
 	int ret;
-	
+
 	ret = deflate_comp_init(ctx);
 	if (ret)
 		goto out;
@@ -153,11 +153,11 @@
 out:
 	return ret;
 }
- 
+
 static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
 			      unsigned int slen, u8 *dst, unsigned int *dlen)
 {
-	
+
 	int ret = 0;
 	struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
 	struct z_stream_s *stream = &dctx->decomp_stream;
@@ -182,7 +182,7 @@
 	if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
 		u8 zerostuff = 0;
 		stream->next_in = &zerostuff;
-		stream->avail_in = 1; 
+		stream->avail_in = 1;
 		ret = zlib_inflate(stream, Z_FINISH);
 	}
 	if (ret != Z_STREAM_END) {
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 5bd3ee3..249f903 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -869,8 +869,7 @@
 
 	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
 		     !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
-		     (*flags & CRYPTO_TFM_REQ_WEAK_KEY))
-	{
+		     (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
 		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
 		return -EINVAL;
 	}
diff --git a/crypto/ecb.c b/crypto/ecb.c
index a46838e..935cfef 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -55,7 +55,7 @@
 
 		do {
 			fn(crypto_cipher_tfm(tfm), wdst, wsrc);
-	
+
 			wsrc += bsize;
 			wdst += bsize;
 		} while ((nbytes -= bsize) >= bsize);
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index b82d61f..c33107e 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -60,13 +60,13 @@
 	u32 t = lo & ((1 << n) - 1);				\
 	lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n));	\
 	hi = (hi >> n) | (t << (24-n));				\
-} while(0)
+} while (0)
 
 /* Rotate right one 64 bit number as a 56 bit number */
 #define ror56_64(k, n)						\
 do {								\
 	k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n));	\
-} while(0)
+} while (0)
 
 /*
  * Sboxes for Feistel network derived from
@@ -228,7 +228,7 @@
 	union lc4 { __be32 l; u8 c[4]; } u;				\
 	u.l = sched ^ R;						\
 	L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \
-} while(0)
+} while (0)
 
 /*
  * encryptor
diff --git a/crypto/gcm.c b/crypto/gcm.c
index c654713..2f5fbba 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -37,6 +37,19 @@
 	u8 nonce[4];
 };
 
+struct crypto_rfc4543_ctx {
+	struct crypto_aead *child;
+	u8 nonce[4];
+};
+
+struct crypto_rfc4543_req_ctx {
+	u8 auth_tag[16];
+	struct scatterlist cipher[1];
+	struct scatterlist payload[2];
+	struct scatterlist assoc[2];
+	struct aead_request subreq;
+};
+
 struct crypto_gcm_ghash_ctx {
 	unsigned int cryptlen;
 	struct scatterlist *src;
@@ -1047,6 +1060,272 @@
 	.module = THIS_MODULE,
 };
 
+static inline struct crypto_rfc4543_req_ctx *crypto_rfc4543_reqctx(
+	struct aead_request *req)
+{
+	unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
+
+	return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
+}
+
+static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
+				 unsigned int keylen)
+{
+	struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
+	struct crypto_aead *child = ctx->child;
+	int err;
+
+	if (keylen < 4)
+		return -EINVAL;
+
+	keylen -= 4;
+	memcpy(ctx->nonce, key + keylen, 4);
+
+	crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
+				     CRYPTO_TFM_REQ_MASK);
+	err = crypto_aead_setkey(child, key, keylen);
+	crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
+				      CRYPTO_TFM_RES_MASK);
+
+	return err;
+}
+
+static int crypto_rfc4543_setauthsize(struct crypto_aead *parent,
+				      unsigned int authsize)
+{
+	struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
+
+	if (authsize != 16)
+		return -EINVAL;
+
+	return crypto_aead_setauthsize(ctx->child, authsize);
+}
+
+/* this is the same as crypto_authenc_chain */
+static void crypto_rfc4543_chain(struct scatterlist *head,
+				 struct scatterlist *sg, int chain)
+{
+	if (chain) {
+		head->length += sg->length;
+		sg = scatterwalk_sg_next(sg);
+	}
+
+	if (sg)
+		scatterwalk_sg_chain(head, 2, sg);
+	else
+		sg_mark_end(head);
+}
+
+static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
+						 int enc)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
+	struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
+	struct aead_request *subreq = &rctx->subreq;
+	struct scatterlist *dst = req->dst;
+	struct scatterlist *cipher = rctx->cipher;
+	struct scatterlist *payload = rctx->payload;
+	struct scatterlist *assoc = rctx->assoc;
+	unsigned int authsize = crypto_aead_authsize(aead);
+	unsigned int assoclen = req->assoclen;
+	struct page *dstp;
+	u8 *vdst;
+	u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
+			   crypto_aead_alignmask(ctx->child) + 1);
+
+	memcpy(iv, ctx->nonce, 4);
+	memcpy(iv + 4, req->iv, 8);
+
+	/* construct cipher/plaintext */
+	if (enc)
+		memset(rctx->auth_tag, 0, authsize);
+	else
+		scatterwalk_map_and_copy(rctx->auth_tag, dst,
+					 req->cryptlen - authsize,
+					 authsize, 0);
+
+	sg_init_one(cipher, rctx->auth_tag, authsize);
+
+	/* construct the aad */
+	dstp = sg_page(dst);
+	vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
+
+	sg_init_table(payload, 2);
+	sg_set_buf(payload, req->iv, 8);
+	crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8);
+	assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
+
+	sg_init_table(assoc, 2);
+	sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
+		    req->assoc->offset);
+	crypto_rfc4543_chain(assoc, payload, 0);
+
+	aead_request_set_tfm(subreq, ctx->child);
+	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
+				  req->base.data);
+	aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv);
+	aead_request_set_assoc(subreq, assoc, assoclen);
+
+	return subreq;
+}
+
+static int crypto_rfc4543_encrypt(struct aead_request *req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
+	struct aead_request *subreq;
+	int err;
+
+	subreq = crypto_rfc4543_crypt(req, 1);
+	err = crypto_aead_encrypt(subreq);
+	if (err)
+		return err;
+
+	scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen,
+				 crypto_aead_authsize(aead), 1);
+
+	return 0;
+}
+
+static int crypto_rfc4543_decrypt(struct aead_request *req)
+{
+	req = crypto_rfc4543_crypt(req, 0);
+
+	return crypto_aead_decrypt(req);
+}
+
+static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm)
+{
+	struct crypto_instance *inst = (void *)tfm->__crt_alg;
+	struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
+	struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_aead *aead;
+	unsigned long align;
+
+	aead = crypto_spawn_aead(spawn);
+	if (IS_ERR(aead))
+		return PTR_ERR(aead);
+
+	ctx->child = aead;
+
+	align = crypto_aead_alignmask(aead);
+	align &= ~(crypto_tfm_ctx_alignment() - 1);
+	tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) +
+				ALIGN(crypto_aead_reqsize(aead),
+				      crypto_tfm_ctx_alignment()) +
+				align + 16;
+
+	return 0;
+}
+
+static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm)
+{
+	struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_aead(ctx->child);
+}
+
+static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb)
+{
+	struct crypto_attr_type *algt;
+	struct crypto_instance *inst;
+	struct crypto_aead_spawn *spawn;
+	struct crypto_alg *alg;
+	const char *ccm_name;
+	int err;
+
+	algt = crypto_get_attr_type(tb);
+	err = PTR_ERR(algt);
+	if (IS_ERR(algt))
+		return ERR_PTR(err);
+
+	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
+		return ERR_PTR(-EINVAL);
+
+	ccm_name = crypto_attr_alg_name(tb[1]);
+	err = PTR_ERR(ccm_name);
+	if (IS_ERR(ccm_name))
+		return ERR_PTR(err);
+
+	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+	if (!inst)
+		return ERR_PTR(-ENOMEM);
+
+	spawn = crypto_instance_ctx(inst);
+	crypto_set_aead_spawn(spawn, inst);
+	err = crypto_grab_aead(spawn, ccm_name, 0,
+			       crypto_requires_sync(algt->type, algt->mask));
+	if (err)
+		goto out_free_inst;
+
+	alg = crypto_aead_spawn_alg(spawn);
+
+	err = -EINVAL;
+
+	/* We only support 16-byte blocks. */
+	if (alg->cra_aead.ivsize != 16)
+		goto out_drop_alg;
+
+	/* Not a stream cipher? */
+	if (alg->cra_blocksize != 1)
+		goto out_drop_alg;
+
+	err = -ENAMETOOLONG;
+	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "rfc4543(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
+	    snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+		     "rfc4543(%s)", alg->cra_driver_name) >=
+	    CRYPTO_MAX_ALG_NAME)
+		goto out_drop_alg;
+
+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
+	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.cra_priority = alg->cra_priority;
+	inst->alg.cra_blocksize = 1;
+	inst->alg.cra_alignmask = alg->cra_alignmask;
+	inst->alg.cra_type = &crypto_nivaead_type;
+
+	inst->alg.cra_aead.ivsize = 8;
+	inst->alg.cra_aead.maxauthsize = 16;
+
+	inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
+
+	inst->alg.cra_init = crypto_rfc4543_init_tfm;
+	inst->alg.cra_exit = crypto_rfc4543_exit_tfm;
+
+	inst->alg.cra_aead.setkey = crypto_rfc4543_setkey;
+	inst->alg.cra_aead.setauthsize = crypto_rfc4543_setauthsize;
+	inst->alg.cra_aead.encrypt = crypto_rfc4543_encrypt;
+	inst->alg.cra_aead.decrypt = crypto_rfc4543_decrypt;
+
+	inst->alg.cra_aead.geniv = "seqiv";
+
+out:
+	return inst;
+
+out_drop_alg:
+	crypto_drop_aead(spawn);
+out_free_inst:
+	kfree(inst);
+	inst = ERR_PTR(err);
+	goto out;
+}
+
+static void crypto_rfc4543_free(struct crypto_instance *inst)
+{
+	crypto_drop_spawn(crypto_instance_ctx(inst));
+	kfree(inst);
+}
+
+static struct crypto_template crypto_rfc4543_tmpl = {
+	.name = "rfc4543",
+	.alloc = crypto_rfc4543_alloc,
+	.free = crypto_rfc4543_free,
+	.module = THIS_MODULE,
+};
+
 static int __init crypto_gcm_module_init(void)
 {
 	int err;
@@ -1067,8 +1346,14 @@
 	if (err)
 		goto out_undo_gcm;
 
+	err = crypto_register_template(&crypto_rfc4543_tmpl);
+	if (err)
+		goto out_undo_rfc4106;
+
 	return 0;
 
+out_undo_rfc4106:
+	crypto_unregister_template(&crypto_rfc4106_tmpl);
 out_undo_gcm:
 	crypto_unregister_template(&crypto_gcm_tmpl);
 out_undo_base:
@@ -1081,6 +1366,7 @@
 static void __exit crypto_gcm_module_exit(void)
 {
 	kfree(gcm_zeroes);
+	crypto_unregister_template(&crypto_rfc4543_tmpl);
 	crypto_unregister_template(&crypto_rfc4106_tmpl);
 	crypto_unregister_template(&crypto_gcm_tmpl);
 	crypto_unregister_template(&crypto_gcm_base_tmpl);
@@ -1094,3 +1380,4 @@
 MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>");
 MODULE_ALIAS("gcm_base");
 MODULE_ALIAS("rfc4106");
+MODULE_ALIAS("rfc4543");
diff --git a/crypto/md5.c b/crypto/md5.c
index 83eb529..9fda213 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -16,17 +16,13 @@
  *
  */
 #include <crypto/internal/hash.h>
+#include <crypto/md5.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <asm/byteorder.h>
 
-#define MD5_DIGEST_SIZE		16
-#define MD5_HMAC_BLOCK_SIZE	64
-#define MD5_BLOCK_WORDS		16
-#define MD5_HASH_WORDS		4
-
 #define F1(x, y, z)	(z ^ (x & (y ^ z)))
 #define F2(x, y, z)	F1(z, x, y)
 #define F3(x, y, z)	(x ^ y ^ z)
@@ -35,12 +31,6 @@
 #define MD5STEP(f, w, x, y, z, in, s) \
 	(w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
 
-struct md5_ctx {
-	u32 hash[MD5_HASH_WORDS];
-	u32 block[MD5_BLOCK_WORDS];
-	u64 byte_count;
-};
-
 static void md5_transform(u32 *hash, u32 const *in)
 {
 	u32 a, b, c, d;
@@ -141,7 +131,7 @@
 	}
 }
 
-static inline void md5_transform_helper(struct md5_ctx *ctx)
+static inline void md5_transform_helper(struct md5_state *ctx)
 {
 	le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
 	md5_transform(ctx->hash, ctx->block);
@@ -149,7 +139,7 @@
 
 static int md5_init(struct shash_desc *desc)
 {
-	struct md5_ctx *mctx = shash_desc_ctx(desc);
+	struct md5_state *mctx = shash_desc_ctx(desc);
 
 	mctx->hash[0] = 0x67452301;
 	mctx->hash[1] = 0xefcdab89;
@@ -162,7 +152,7 @@
 
 static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
 {
-	struct md5_ctx *mctx = shash_desc_ctx(desc);
+	struct md5_state *mctx = shash_desc_ctx(desc);
 	const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
 
 	mctx->byte_count += len;
@@ -194,7 +184,7 @@
 
 static int md5_final(struct shash_desc *desc, u8 *out)
 {
-	struct md5_ctx *mctx = shash_desc_ctx(desc);
+	struct md5_state *mctx = shash_desc_ctx(desc);
 	const unsigned int offset = mctx->byte_count & 0x3f;
 	char *p = (char *)mctx->block + offset;
 	int padding = 56 - (offset + 1);
@@ -220,12 +210,30 @@
 	return 0;
 }
 
+static int md5_export(struct shash_desc *desc, void *out)
+{
+	struct md5_state *ctx = shash_desc_ctx(desc);
+
+	memcpy(out, ctx, sizeof(*ctx));
+	return 0;
+}
+
+static int md5_import(struct shash_desc *desc, const void *in)
+{
+	struct md5_state *ctx = shash_desc_ctx(desc);
+
+	memcpy(ctx, in, sizeof(*ctx));
+	return 0;
+}
+
 static struct shash_alg alg = {
 	.digestsize	=	MD5_DIGEST_SIZE,
 	.init		=	md5_init,
 	.update		=	md5_update,
 	.final		=	md5_final,
-	.descsize	=	sizeof(struct md5_ctx),
+	.export		=	md5_export,
+	.import		=	md5_import,
+	.descsize	=	sizeof(struct md5_state),
 	.base		=	{
 		.cra_name	=	"md5",
 		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
new file mode 100644
index 0000000..8020124
--- /dev/null
+++ b/crypto/pcrypt.c
@@ -0,0 +1,445 @@
+/*
+ * pcrypt - Parallel crypto wrapper.
+ *
+ * Copyright (C) 2009 secunet Security Networks AG
+ * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <crypto/pcrypt.h>
+
+static struct padata_instance *pcrypt_enc_padata;
+static struct padata_instance *pcrypt_dec_padata;
+static struct workqueue_struct *encwq;
+static struct workqueue_struct *decwq;
+
+struct pcrypt_instance_ctx {
+	struct crypto_spawn spawn;
+	unsigned int tfm_count;
+};
+
+struct pcrypt_aead_ctx {
+	struct crypto_aead *child;
+	unsigned int cb_cpu;
+};
+
+static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
+			      struct padata_instance *pinst)
+{
+	unsigned int cpu_index, cpu, i;
+
+	cpu = *cb_cpu;
+
+	if (cpumask_test_cpu(cpu, cpu_active_mask))
+			goto out;
+
+	cpu_index = cpu % cpumask_weight(cpu_active_mask);
+
+	cpu = cpumask_first(cpu_active_mask);
+	for (i = 0; i < cpu_index; i++)
+		cpu = cpumask_next(cpu, cpu_active_mask);
+
+	*cb_cpu = cpu;
+
+out:
+	return padata_do_parallel(pinst, padata, cpu);
+}
+
+static int pcrypt_aead_setkey(struct crypto_aead *parent,
+			      const u8 *key, unsigned int keylen)
+{
+	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
+
+	return crypto_aead_setkey(ctx->child, key, keylen);
+}
+
+static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
+				   unsigned int authsize)
+{
+	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
+
+	return crypto_aead_setauthsize(ctx->child, authsize);
+}
+
+static void pcrypt_aead_serial(struct padata_priv *padata)
+{
+	struct pcrypt_request *preq = pcrypt_padata_request(padata);
+	struct aead_request *req = pcrypt_request_ctx(preq);
+
+	aead_request_complete(req->base.data, padata->info);
+}
+
+static void pcrypt_aead_giv_serial(struct padata_priv *padata)
+{
+	struct pcrypt_request *preq = pcrypt_padata_request(padata);
+	struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
+
+	aead_request_complete(req->areq.base.data, padata->info);
+}
+
+static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
+{
+	struct aead_request *req = areq->data;
+	struct pcrypt_request *preq = aead_request_ctx(req);
+	struct padata_priv *padata = pcrypt_request_padata(preq);
+
+	padata->info = err;
+	req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	padata_do_serial(padata);
+}
+
+static void pcrypt_aead_enc(struct padata_priv *padata)
+{
+	struct pcrypt_request *preq = pcrypt_padata_request(padata);
+	struct aead_request *req = pcrypt_request_ctx(preq);
+
+	padata->info = crypto_aead_encrypt(req);
+
+	if (padata->info == -EINPROGRESS)
+		return;
+
+	padata_do_serial(padata);
+}
+
+static int pcrypt_aead_encrypt(struct aead_request *req)
+{
+	int err;
+	struct pcrypt_request *preq = aead_request_ctx(req);
+	struct aead_request *creq = pcrypt_request_ctx(preq);
+	struct padata_priv *padata = pcrypt_request_padata(preq);
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
+	u32 flags = aead_request_flags(req);
+
+	memset(padata, 0, sizeof(struct padata_priv));
+
+	padata->parallel = pcrypt_aead_enc;
+	padata->serial = pcrypt_aead_serial;
+
+	aead_request_set_tfm(creq, ctx->child);
+	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
+				  pcrypt_aead_done, req);
+	aead_request_set_crypt(creq, req->src, req->dst,
+			       req->cryptlen, req->iv);
+	aead_request_set_assoc(creq, req->assoc, req->assoclen);
+
+	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata);
+	if (err)
+		return err;
+	else
+		err = crypto_aead_encrypt(creq);
+
+	return err;
+}
+
+static void pcrypt_aead_dec(struct padata_priv *padata)
+{
+	struct pcrypt_request *preq = pcrypt_padata_request(padata);
+	struct aead_request *req = pcrypt_request_ctx(preq);
+
+	padata->info = crypto_aead_decrypt(req);
+
+	if (padata->info == -EINPROGRESS)
+		return;
+
+	padata_do_serial(padata);
+}
+
+static int pcrypt_aead_decrypt(struct aead_request *req)
+{
+	int err;
+	struct pcrypt_request *preq = aead_request_ctx(req);
+	struct aead_request *creq = pcrypt_request_ctx(preq);
+	struct padata_priv *padata = pcrypt_request_padata(preq);
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
+	u32 flags = aead_request_flags(req);
+
+	memset(padata, 0, sizeof(struct padata_priv));
+
+	padata->parallel = pcrypt_aead_dec;
+	padata->serial = pcrypt_aead_serial;
+
+	aead_request_set_tfm(creq, ctx->child);
+	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
+				  pcrypt_aead_done, req);
+	aead_request_set_crypt(creq, req->src, req->dst,
+			       req->cryptlen, req->iv);
+	aead_request_set_assoc(creq, req->assoc, req->assoclen);
+
+	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_dec_padata);
+	if (err)
+		return err;
+	else
+		err = crypto_aead_decrypt(creq);
+
+	return err;
+}
+
+static void pcrypt_aead_givenc(struct padata_priv *padata)
+{
+	struct pcrypt_request *preq = pcrypt_padata_request(padata);
+	struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
+
+	padata->info = crypto_aead_givencrypt(req);
+
+	if (padata->info == -EINPROGRESS)
+		return;
+
+	padata_do_serial(padata);
+}
+
+static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
+{
+	int err;
+	struct aead_request *areq = &req->areq;
+	struct pcrypt_request *preq = aead_request_ctx(areq);
+	struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
+	struct padata_priv *padata = pcrypt_request_padata(preq);
+	struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
+	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
+	u32 flags = aead_request_flags(areq);
+
+	memset(padata, 0, sizeof(struct padata_priv));
+
+	padata->parallel = pcrypt_aead_givenc;
+	padata->serial = pcrypt_aead_giv_serial;
+
+	aead_givcrypt_set_tfm(creq, ctx->child);
+	aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
+				   pcrypt_aead_done, areq);
+	aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
+				areq->cryptlen, areq->iv);
+	aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
+	aead_givcrypt_set_giv(creq, req->giv, req->seq);
+
+	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata);
+	if (err)
+		return err;
+	else
+		err = crypto_aead_givencrypt(creq);
+
+	return err;
+}
+
+static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
+{
+	int cpu, cpu_index;
+	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+	struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
+	struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_aead *cipher;
+
+	ictx->tfm_count++;
+
+	cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
+
+	ctx->cb_cpu = cpumask_first(cpu_active_mask);
+	for (cpu = 0; cpu < cpu_index; cpu++)
+		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
+
+	cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
+
+	if (IS_ERR(cipher))
+		return PTR_ERR(cipher);
+
+	ctx->child = cipher;
+	tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
+		+ sizeof(struct aead_givcrypt_request)
+		+ crypto_aead_reqsize(cipher);
+
+	return 0;
+}
+
+static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
+{
+	struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_aead(ctx->child);
+}
+
+static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
+{
+	struct crypto_instance *inst;
+	struct pcrypt_instance_ctx *ctx;
+	int err;
+
+	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+	if (!inst) {
+		inst = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	err = -ENAMETOOLONG;
+	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+		     "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+		goto out_free_inst;
+
+	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
+
+	ctx = crypto_instance_ctx(inst);
+	err = crypto_init_spawn(&ctx->spawn, alg, inst,
+				CRYPTO_ALG_TYPE_MASK);
+	if (err)
+		goto out_free_inst;
+
+	inst->alg.cra_priority = alg->cra_priority + 100;
+	inst->alg.cra_blocksize = alg->cra_blocksize;
+	inst->alg.cra_alignmask = alg->cra_alignmask;
+
+out:
+	return inst;
+
+out_free_inst:
+	kfree(inst);
+	inst = ERR_PTR(err);
+	goto out;
+}
+
+static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb)
+{
+	struct crypto_instance *inst;
+	struct crypto_alg *alg;
+	struct crypto_attr_type *algt;
+
+	algt = crypto_get_attr_type(tb);
+
+	alg = crypto_get_attr_alg(tb, algt->type,
+				  (algt->mask & CRYPTO_ALG_TYPE_MASK));
+	if (IS_ERR(alg))
+		return ERR_CAST(alg);
+
+	inst = pcrypt_alloc_instance(alg);
+	if (IS_ERR(inst))
+		goto out_put_alg;
+
+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+	inst->alg.cra_type = &crypto_aead_type;
+
+	inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
+	inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
+	inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
+
+	inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
+
+	inst->alg.cra_init = pcrypt_aead_init_tfm;
+	inst->alg.cra_exit = pcrypt_aead_exit_tfm;
+
+	inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
+	inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
+	inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
+	inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
+	inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
+
+out_put_alg:
+	crypto_mod_put(alg);
+	return inst;
+}
+
+static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
+{
+	struct crypto_attr_type *algt;
+
+	algt = crypto_get_attr_type(tb);
+	if (IS_ERR(algt))
+		return ERR_CAST(algt);
+
+	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
+	case CRYPTO_ALG_TYPE_AEAD:
+		return pcrypt_alloc_aead(tb);
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static void pcrypt_free(struct crypto_instance *inst)
+{
+	struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
+
+	crypto_drop_spawn(&ctx->spawn);
+	kfree(inst);
+}
+
+static struct crypto_template pcrypt_tmpl = {
+	.name = "pcrypt",
+	.alloc = pcrypt_alloc,
+	.free = pcrypt_free,
+	.module = THIS_MODULE,
+};
+
+static int __init pcrypt_init(void)
+{
+	encwq = create_workqueue("pencrypt");
+	if (!encwq)
+		goto err;
+
+	decwq = create_workqueue("pdecrypt");
+	if (!decwq)
+		goto err_destroy_encwq;
+
+
+	pcrypt_enc_padata = padata_alloc(cpu_possible_mask, encwq);
+	if (!pcrypt_enc_padata)
+		goto err_destroy_decwq;
+
+	pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq);
+	if (!pcrypt_dec_padata)
+		goto err_free_padata;
+
+	padata_start(pcrypt_enc_padata);
+	padata_start(pcrypt_dec_padata);
+
+	return crypto_register_template(&pcrypt_tmpl);
+
+err_free_padata:
+	padata_free(pcrypt_enc_padata);
+
+err_destroy_decwq:
+	destroy_workqueue(decwq);
+
+err_destroy_encwq:
+	destroy_workqueue(encwq);
+
+err:
+	return -ENOMEM;
+}
+
+static void __exit pcrypt_exit(void)
+{
+	padata_stop(pcrypt_enc_padata);
+	padata_stop(pcrypt_dec_padata);
+
+	destroy_workqueue(encwq);
+	destroy_workqueue(decwq);
+
+	padata_free(pcrypt_enc_padata);
+	padata_free(pcrypt_dec_padata);
+
+	crypto_unregister_template(&pcrypt_tmpl);
+}
+
+module_init(pcrypt_init);
+module_exit(pcrypt_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
+MODULE_DESCRIPTION("Parallel crypto wrapper");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 7620bfc..c494d76 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1477,9 +1477,54 @@
 	return err;
 }
 
+static int alg_test_null(const struct alg_test_desc *desc,
+			     const char *driver, u32 type, u32 mask)
+{
+	return 0;
+}
+
 /* Please keep this list sorted by algorithm name. */
 static const struct alg_test_desc alg_test_descs[] = {
 	{
+		.alg = "__driver-cbc-aes-aesni",
+		.test = alg_test_null,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = NULL,
+					.count = 0
+				},
+				.dec = {
+					.vecs = NULL,
+					.count = 0
+				}
+			}
+		}
+	}, {
+		.alg = "__driver-ecb-aes-aesni",
+		.test = alg_test_null,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = NULL,
+					.count = 0
+				},
+				.dec = {
+					.vecs = NULL,
+					.count = 0
+				}
+			}
+		}
+	}, {
+		.alg = "__ghash-pclmulqdqni",
+		.test = alg_test_null,
+		.suite = {
+			.hash = {
+				.vecs = NULL,
+				.count = 0
+			}
+		}
+	}, {
 		.alg = "ansi_cprng",
 		.test = alg_test_cprng,
 		.fips_allowed = 1,
@@ -1623,6 +1668,30 @@
 			}
 		}
 	}, {
+		.alg = "cryptd(__driver-ecb-aes-aesni)",
+		.test = alg_test_null,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = NULL,
+					.count = 0
+				},
+				.dec = {
+					.vecs = NULL,
+					.count = 0
+				}
+			}
+		}
+	}, {
+		.alg = "cryptd(__ghash-pclmulqdqni)",
+		.test = alg_test_null,
+		.suite = {
+			.hash = {
+				.vecs = NULL,
+				.count = 0
+			}
+		}
+	}, {
 		.alg = "ctr(aes)",
 		.test = alg_test_skcipher,
 		.fips_allowed = 1,
@@ -1669,6 +1738,21 @@
 			}
 		}
 	}, {
+		.alg = "ecb(__aes-aesni)",
+		.test = alg_test_null,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = NULL,
+					.count = 0
+				},
+				.dec = {
+					.vecs = NULL,
+					.count = 0
+				}
+			}
+		}
+	}, {
 		.alg = "ecb(aes)",
 		.test = alg_test_skcipher,
 		.fips_allowed = 1,
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 0bba148..4ced54f 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -76,12 +76,9 @@
  * evgpe - GPE handling and dispatch
  */
 acpi_status
-acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
-				u8 type);
+acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
 
-acpi_status
-acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
-		   u8 write_to_hardware);
+acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
 
 acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
 
@@ -122,9 +119,6 @@
 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
 
 acpi_status
-acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type);
-
-acpi_status
 acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info);
 
 acpi_status acpi_ev_gpe_initialize(void);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 81e64f4..13cb80c 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -426,6 +426,8 @@
 	struct acpi_gpe_register_info *register_info;	/* Backpointer to register info */
 	u8 flags;		/* Misc info about this GPE */
 	u8 gpe_number;		/* This GPE */
+	u8 runtime_count;
+	u8 wakeup_count;
 };
 
 /* Information about a GPE register pair, one per each status/enable pair in an array */
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 64062b1..07f6e2e 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -287,8 +287,10 @@
 
 struct acpi_object_notify_handler {
 	ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node;	/* Parent device */
+	u32 handler_type;
 	acpi_notify_handler handler;
 	void *context;
+	struct acpi_object_notify_handler *next;
 };
 
 struct acpi_object_addr_handler {
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index afacf44..0b45346 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -54,54 +54,9 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ev_set_gpe_type
- *
- * PARAMETERS:  gpe_event_info          - GPE to set
- *              Type                    - New type
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run)
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_set_gpe_type);
-
-	/* Validate type and update register enable masks */
-
-	switch (type) {
-	case ACPI_GPE_TYPE_WAKE:
-	case ACPI_GPE_TYPE_RUNTIME:
-	case ACPI_GPE_TYPE_WAKE_RUN:
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Disable the GPE if currently enabled */
-
-	status = acpi_ev_disable_gpe(gpe_event_info);
-
-	/* Clear the type bits and insert the new Type */
-
-	gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK;
-	gpe_event_info->flags |= type;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
  * FUNCTION:    acpi_ev_update_gpe_enable_masks
  *
  * PARAMETERS:  gpe_event_info          - GPE to update
- *              Type                    - What to do: ACPI_GPE_DISABLE or
- *                                        ACPI_GPE_ENABLE
  *
  * RETURN:      Status
  *
@@ -110,8 +65,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
-				u8 type)
+acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
 {
 	struct acpi_gpe_register_info *gpe_register_info;
 	u8 register_bit;
@@ -127,37 +81,14 @@
 	    (1 <<
 	     (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
 
-	/* 1) Disable case. Simply clear all enable bits */
+	ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit);
+	ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
 
-	if (type == ACPI_GPE_DISABLE) {
-		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
-			       register_bit);
-		ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* 2) Enable case. Set/Clear the appropriate enable bits */
-
-	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
-	case ACPI_GPE_TYPE_WAKE:
-		ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
-		ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
-		break;
-
-	case ACPI_GPE_TYPE_RUNTIME:
-		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
-			       register_bit);
+	if (gpe_event_info->runtime_count)
 		ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
-		break;
 
-	case ACPI_GPE_TYPE_WAKE_RUN:
+	if (gpe_event_info->wakeup_count)
 		ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
-		ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
 
 	return_ACPI_STATUS(AE_OK);
 }
@@ -167,8 +98,6 @@
  * FUNCTION:    acpi_ev_enable_gpe
  *
  * PARAMETERS:  gpe_event_info          - GPE to enable
- *              write_to_hardware       - Enable now, or just mark data structs
- *                                        (WAKE GPEs should be deferred)
  *
  * RETURN:      Status
  *
@@ -176,9 +105,7 @@
  *
  ******************************************************************************/
 
-acpi_status
-acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
-		   u8 write_to_hardware)
+acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
 {
 	acpi_status status;
 
@@ -186,47 +113,20 @@
 
 	/* Make sure HW enable masks are updated */
 
-	status =
-	    acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE);
-	if (ACPI_FAILURE(status)) {
+	status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
+	if (ACPI_FAILURE(status))
 		return_ACPI_STATUS(status);
-	}
 
 	/* Mark wake-enabled or HW enable, or both */
 
-	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
-	case ACPI_GPE_TYPE_WAKE:
+	if (gpe_event_info->runtime_count) {
+		/* Clear the GPE (of stale events), then enable it */
+		status = acpi_hw_clear_gpe(gpe_event_info);
+		if (ACPI_FAILURE(status))
+			return_ACPI_STATUS(status);
 
-		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
-		break;
-
-	case ACPI_GPE_TYPE_WAKE_RUN:
-
-		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
-
-		/*lint -fallthrough */
-
-	case ACPI_GPE_TYPE_RUNTIME:
-
-		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
-
-		if (write_to_hardware) {
-
-			/* Clear the GPE (of stale events), then enable it */
-
-			status = acpi_hw_clear_gpe(gpe_event_info);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-
-			/* Enable the requested runtime GPE */
-
-			status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
-		}
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
+		/* Enable the requested runtime GPE */
+		status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
 	}
 
 	return_ACPI_STATUS(AE_OK);
@@ -252,34 +152,9 @@
 
 	/* Make sure HW enable masks are updated */
 
-	status =
-	    acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE);
-	if (ACPI_FAILURE(status)) {
+	status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
+	if (ACPI_FAILURE(status))
 		return_ACPI_STATUS(status);
-	}
-
-	/* Clear the appropriate enabled flags for this GPE */
-
-	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
-	case ACPI_GPE_TYPE_WAKE:
-		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
-		break;
-
-	case ACPI_GPE_TYPE_WAKE_RUN:
-		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
-
-		/* fallthrough */
-
-	case ACPI_GPE_TYPE_RUNTIME:
-
-		/* Disable the requested runtime GPE */
-
-		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
-		break;
-
-	default:
-		break;
-	}
 
 	/*
 	 * Even if we don't know the GPE type, make sure that we always
@@ -521,7 +396,7 @@
 
 	/* Set the GPE flags for return to enabled state */
 
-	(void)acpi_ev_enable_gpe(gpe_event_info, FALSE);
+	(void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
 
 	/*
 	 * Take a snapshot of the GPE info for this level - we copy the info to
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 2479209..3d4c4ac 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -258,7 +258,6 @@
 	u32 gpe_number;
 	char name[ACPI_NAME_SIZE + 1];
 	u8 type;
-	acpi_status status;
 
 	ACPI_FUNCTION_TRACE(ev_save_method_info);
 
@@ -325,26 +324,20 @@
 
 	/*
 	 * Now we can add this information to the gpe_event_info block for use
-	 * during dispatch of this GPE. Default type is RUNTIME, although this may
-	 * change when the _PRW methods are executed later.
+	 * during dispatch of this GPE.
 	 */
 	gpe_event_info =
 	    &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
 
-	gpe_event_info->flags = (u8)
-	    (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME);
+	gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD);
 
 	gpe_event_info->dispatch.method_node =
 	    (struct acpi_namespace_node *)obj_handle;
 
-	/* Update enable mask, but don't enable the HW GPE as of yet */
-
-	status = acpi_ev_enable_gpe(gpe_event_info, FALSE);
-
 	ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
 			  "Registered GPE method %s as GPE number 0x%.2X\n",
 			  name, gpe_number));
-	return_ACPI_STATUS(status);
+	return_ACPI_STATUS(AE_OK);
 }
 
 /*******************************************************************************
@@ -454,20 +447,7 @@
 							gpe_block->
 							block_base_number];
 
-		/* Mark GPE for WAKE-ONLY but WAKE_DISABLED */
-
-		gpe_event_info->flags &=
-		    ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
-
-		status =
-		    acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-
-		status =
-		    acpi_ev_update_gpe_enable_masks(gpe_event_info,
-						    ACPI_GPE_DISABLE);
+		gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
 	}
 
       cleanup:
@@ -989,7 +969,6 @@
 acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
 			     struct acpi_gpe_block_info *gpe_block)
 {
-	acpi_status status;
 	struct acpi_gpe_event_info *gpe_event_info;
 	struct acpi_gpe_walk_info gpe_info;
 	u32 wake_gpe_count;
@@ -1019,42 +998,50 @@
 		gpe_info.gpe_block = gpe_block;
 		gpe_info.gpe_device = gpe_device;
 
-		status =
-		    acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+		acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
 					   ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
 					   acpi_ev_match_prw_and_gpe, NULL,
 					   &gpe_info, NULL);
 	}
 
 	/*
-	 * Enable all GPEs in this block that have these attributes:
-	 * 1) are "runtime" or "run/wake" GPEs, and
-	 * 2) have a corresponding _Lxx or _Exx method
-	 *
-	 * Any other GPEs within this block must be enabled via the
-	 * acpi_enable_gpe() external interface.
+	 * Enable all GPEs that have a corresponding method and aren't
+	 * capable of generating wakeups. Any other GPEs within this block
+	 * must be enabled via the acpi_enable_gpe() interface.
 	 */
 	wake_gpe_count = 0;
 	gpe_enabled_count = 0;
+	if (gpe_device == acpi_gbl_fadt_gpe_device)
+		gpe_device = NULL;
 
 	for (i = 0; i < gpe_block->register_count; i++) {
-		for (j = 0; j < 8; j++) {
+		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+			acpi_status status;
+			acpi_size gpe_index;
+			int gpe_number;
 
 			/* Get the info block for this particular GPE */
+			gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j;
+			gpe_event_info = &gpe_block->event_info[gpe_index];
 
-			gpe_event_info = &gpe_block->event_info[((acpi_size) i *
-								 ACPI_GPE_REGISTER_WIDTH)
-								+ j];
-
-			if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-			     ACPI_GPE_DISPATCH_METHOD) &&
-			    (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
-				gpe_enabled_count++;
-			}
-
-			if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) {
+			if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
 				wake_gpe_count++;
+				if (acpi_gbl_leave_wake_gpes_disabled)
+					continue;
 			}
+
+			if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD))
+				continue;
+
+			gpe_number = gpe_index + gpe_block->block_base_number;
+			status = acpi_enable_gpe(gpe_device, gpe_number,
+						ACPI_GPE_TYPE_RUNTIME);
+			if (ACPI_FAILURE(status))
+				ACPI_ERROR((AE_INFO,
+						"Failed to enable GPE %02X\n",
+						gpe_number));
+			else
+				gpe_enabled_count++;
 		}
 	}
 
@@ -1062,15 +1049,7 @@
 			  "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
 			  wake_gpe_count, gpe_enabled_count));
 
-	/* Enable all valid runtime GPEs found above */
-
-	status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block, NULL);
-	if (ACPI_FAILURE(status)) {
-		ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
-			    gpe_block));
-	}
-
-	return_ACPI_STATUS(status);
+	return_ACPI_STATUS(AE_OK);
 }
 
 /*******************************************************************************
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index ce224e1..8f0fac6 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -259,9 +259,15 @@
 
 	handler_obj = notify_info->notify.handler_obj;
 	if (handler_obj) {
-		handler_obj->notify.handler(notify_info->notify.node,
-					    notify_info->notify.value,
-					    handler_obj->notify.context);
+		struct acpi_object_notify_handler *notifier;
+
+		notifier = &handler_obj->notify;
+		while (notifier) {
+			notifier->handler(notify_info->notify.node,
+					  notify_info->notify.value,
+					  notifier->context);
+			notifier = notifier->next;
+		}
 	}
 
 	/* All done with the info object */
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 2fe0809..474e2ca 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -218,6 +218,72 @@
 
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_populate_handler_object
+ *
+ * PARAMETERS:  handler_obj        - Handler object to populate
+ *              handler_type       - The type of handler:
+ *                                  ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
+ *                                  ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
+ *                                  ACPI_ALL_NOTIFY:  both system and device
+ *              handler            - Address of the handler
+ *              context            - Value passed to the handler on each GPE
+ *              next               - Address of a handler object to link to
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Populate a handler object.
+ *
+ ******************************************************************************/
+static void
+acpi_populate_handler_object(struct acpi_object_notify_handler *handler_obj,
+			     u32 handler_type,
+			     acpi_notify_handler handler, void *context,
+			     struct acpi_object_notify_handler *next)
+{
+	handler_obj->handler_type = handler_type;
+	handler_obj->handler = handler;
+	handler_obj->context = context;
+	handler_obj->next = next;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_add_handler_object
+ *
+ * PARAMETERS:  parent_obj         - Parent of the new object
+ *              handler            - Address of the handler
+ *              context            - Value passed to the handler on each GPE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new handler object and populate it.
+ *
+ ******************************************************************************/
+static acpi_status
+acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj,
+			acpi_notify_handler handler, void *context)
+{
+	struct acpi_object_notify_handler *handler_obj;
+
+	/* The parent must not be a defice notify handler object. */
+	if (parent_obj->handler_type & ACPI_DEVICE_NOTIFY)
+		return AE_BAD_PARAMETER;
+
+	handler_obj = ACPI_ALLOCATE_ZEROED(sizeof(*handler_obj));
+	if (!handler_obj)
+		return AE_NO_MEMORY;
+
+	acpi_populate_handler_object(handler_obj,
+					ACPI_SYSTEM_NOTIFY,
+					handler, context,
+					parent_obj->next);
+	parent_obj->next = handler_obj;
+
+	return AE_OK;
+}
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_install_notify_handler
  *
  * PARAMETERS:  Device          - The device for which notifies will be handled
@@ -316,15 +382,32 @@
 		obj_desc = acpi_ns_get_attached_object(node);
 		if (obj_desc) {
 
-			/* Object exists - make sure there's no handler */
+			/* Object exists. */
 
-			if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
-			     obj_desc->common_notify.system_notify) ||
-			    ((handler_type & ACPI_DEVICE_NOTIFY) &&
-			     obj_desc->common_notify.device_notify)) {
+			/* For a device notify, make sure there's no handler. */
+			if ((handler_type & ACPI_DEVICE_NOTIFY) &&
+			     obj_desc->common_notify.device_notify) {
 				status = AE_ALREADY_EXISTS;
 				goto unlock_and_exit;
 			}
+
+			/* System notifies may have more handlers installed. */
+			notify_obj = obj_desc->common_notify.system_notify;
+
+			if ((handler_type & ACPI_SYSTEM_NOTIFY) && notify_obj) {
+				struct acpi_object_notify_handler *parent_obj;
+
+				if (handler_type & ACPI_DEVICE_NOTIFY) {
+					status = AE_ALREADY_EXISTS;
+					goto unlock_and_exit;
+				}
+
+				parent_obj = &notify_obj->notify;
+				status = acpi_add_handler_object(parent_obj,
+								 handler,
+								 context);
+				goto unlock_and_exit;
+			}
 		} else {
 			/* Create a new object */
 
@@ -356,9 +439,10 @@
 			goto unlock_and_exit;
 		}
 
-		notify_obj->notify.node = node;
-		notify_obj->notify.handler = handler;
-		notify_obj->notify.context = context;
+		acpi_populate_handler_object(&notify_obj->notify,
+						handler_type,
+						handler, context,
+						NULL);
 
 		if (handler_type & ACPI_SYSTEM_NOTIFY) {
 			obj_desc->common_notify.system_notify = notify_obj;
@@ -418,6 +502,10 @@
 		goto exit;
 	}
 
+
+	/* Make sure all deferred tasks are completed */
+	acpi_os_wait_events_complete(NULL);
+
 	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
 	if (ACPI_FAILURE(status)) {
 		goto exit;
@@ -445,15 +533,6 @@
 			goto unlock_and_exit;
 		}
 
-		/* Make sure all deferred tasks are completed */
-
-		(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-		acpi_os_wait_events_complete(NULL);
-		status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-		if (ACPI_FAILURE(status)) {
-			goto exit;
-		}
-
 		if (handler_type & ACPI_SYSTEM_NOTIFY) {
 			acpi_gbl_system_notify.node = NULL;
 			acpi_gbl_system_notify.handler = NULL;
@@ -488,28 +567,60 @@
 		/* Object exists - make sure there's an existing handler */
 
 		if (handler_type & ACPI_SYSTEM_NOTIFY) {
+			struct acpi_object_notify_handler *handler_obj;
+			struct acpi_object_notify_handler *parent_obj;
+
 			notify_obj = obj_desc->common_notify.system_notify;
 			if (!notify_obj) {
 				status = AE_NOT_EXIST;
 				goto unlock_and_exit;
 			}
 
-			if (notify_obj->notify.handler != handler) {
+			handler_obj = &notify_obj->notify;
+			parent_obj = NULL;
+			while (handler_obj->handler != handler) {
+				if (handler_obj->next) {
+					parent_obj = handler_obj;
+					handler_obj = handler_obj->next;
+				} else {
+					break;
+				}
+			}
+
+			if (handler_obj->handler != handler) {
 				status = AE_BAD_PARAMETER;
 				goto unlock_and_exit;
 			}
-			/* Make sure all deferred tasks are completed */
 
-			(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-			acpi_os_wait_events_complete(NULL);
-			status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-			if (ACPI_FAILURE(status)) {
-				goto exit;
+			/*
+			 * Remove the handler.  There are three possible cases.
+			 * First, we may need to remove a non-embedded object.
+			 * Second, we may need to remove the embedded object's
+			 * handler data, while non-embedded objects exist.
+			 * Finally, we may need to remove the embedded object
+			 * entirely along with its container.
+			 */
+			if (parent_obj) {
+				/* Non-embedded object is being removed. */
+				parent_obj->next = handler_obj->next;
+				ACPI_FREE(handler_obj);
+			} else if (notify_obj->notify.next) {
+				/*
+				 * The handler matches the embedded object, but
+				 * there are more handler objects in the list.
+				 * Replace the embedded object's data with the
+				 * first next object's data and remove that
+				 * object.
+				 */
+				parent_obj = &notify_obj->notify;
+				handler_obj = notify_obj->notify.next;
+				*parent_obj = *handler_obj;
+				ACPI_FREE(handler_obj);
+			} else {
+				/* No more handler objects in the list. */
+				obj_desc->common_notify.system_notify = NULL;
+				acpi_ut_remove_reference(notify_obj);
 			}
-
-			/* Remove the handler */
-			obj_desc->common_notify.system_notify = NULL;
-			acpi_ut_remove_reference(notify_obj);
 		}
 
 		if (handler_type & ACPI_DEVICE_NOTIFY) {
@@ -523,14 +634,6 @@
 				status = AE_BAD_PARAMETER;
 				goto unlock_and_exit;
 			}
-			/* Make sure all deferred tasks are completed */
-
-			(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-			acpi_os_wait_events_complete(NULL);
-			status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-			if (ACPI_FAILURE(status)) {
-				goto exit;
-			}
 
 			/* Remove the handler */
 			obj_desc->common_notify.device_notify = NULL;
@@ -617,13 +720,6 @@
 	handler->context = context;
 	handler->method_node = gpe_event_info->dispatch.method_node;
 
-	/* Disable the GPE before installing the handler */
-
-	status = acpi_ev_disable_gpe(gpe_event_info);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
 	/* Install the handler */
 
 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@@ -707,13 +803,6 @@
 		goto unlock_and_exit;
 	}
 
-	/* Disable the GPE before removing the handler */
-
-	status = acpi_ev_disable_gpe(gpe_event_info);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
 	/* Make sure all deferred tasks are completed */
 
 	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index eed7a38..124c157 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -201,67 +201,25 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_set_gpe_type
+ * FUNCTION:    acpi_set_gpe
  *
  * PARAMETERS:  gpe_device      - Parent GPE Device
  *              gpe_number      - GPE level within the GPE block
- *              Type            - New GPE type
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Set the type of an individual GPE
- *
- ******************************************************************************/
-acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type)
-{
-	acpi_status status = AE_OK;
-	struct acpi_gpe_event_info *gpe_event_info;
-
-	ACPI_FUNCTION_TRACE(acpi_set_gpe_type);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == type) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Set the new type (will disable GPE if currently enabled) */
-
-	status = acpi_ev_set_gpe_type(gpe_event_info, type);
-
-      unlock_and_exit:
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_set_gpe_type)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enable_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device
- *              gpe_number      - GPE level within the GPE block
- *              Flags           - Just enable, or also wake enable?
+ *              action          - Enable or disable
  *                                Called from ISR or not
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Enable an ACPI event (general purpose)
+ * DESCRIPTION: Enable or disable an ACPI event (general purpose)
  *
  ******************************************************************************/
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
+acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
 {
 	acpi_status status = AE_OK;
 	acpi_cpu_flags flags;
 	struct acpi_gpe_event_info *gpe_event_info;
 
-	ACPI_FUNCTION_TRACE(acpi_enable_gpe);
+	ACPI_FUNCTION_TRACE(acpi_set_gpe);
 
 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 
@@ -273,15 +231,90 @@
 		goto unlock_and_exit;
 	}
 
-	/* Perform the enable */
+	/* Perform the action */
 
-	status = acpi_ev_enable_gpe(gpe_event_info, TRUE);
+	switch (action) {
+	case ACPI_GPE_ENABLE:
+		status = acpi_ev_enable_gpe(gpe_event_info);
+		break;
+
+	case ACPI_GPE_DISABLE:
+		status = acpi_ev_disable_gpe(gpe_event_info);
+		break;
+
+	default:
+		ACPI_ERROR((AE_INFO, "Invalid action\n"));
+		status = AE_BAD_PARAMETER;
+		break;
+	}
 
       unlock_and_exit:
 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 	return_ACPI_STATUS(status);
 }
 
+ACPI_EXPORT_SYMBOL(acpi_set_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enable_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device
+ *              gpe_number      - GPE level within the GPE block
+ *              type            - Purpose the GPE will be used for
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Take a reference to a GPE and enable it if necessary
+ *
+ ******************************************************************************/
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
+{
+	acpi_status status = AE_OK;
+	acpi_cpu_flags flags;
+	struct acpi_gpe_event_info *gpe_event_info;
+
+	ACPI_FUNCTION_TRACE(acpi_enable_gpe);
+
+	if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	if (type & ACPI_GPE_TYPE_RUNTIME) {
+		if (++gpe_event_info->runtime_count == 1) {
+			status = acpi_ev_enable_gpe(gpe_event_info);
+			if (ACPI_FAILURE(status))
+				gpe_event_info->runtime_count--;
+		}
+	}
+
+	if (type & ACPI_GPE_TYPE_WAKE) {
+		if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
+			status = AE_BAD_PARAMETER;
+			goto unlock_and_exit;
+		}
+
+		/*
+		 * Wake-up GPEs are only enabled right prior to putting the
+		 * system into a sleep state.
+		 */
+		if (++gpe_event_info->wakeup_count == 1)
+			acpi_ev_update_gpe_enable_masks(gpe_event_info);
+	}
+
+unlock_and_exit:
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
 ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
 
 /*******************************************************************************
@@ -290,15 +323,14 @@
  *
  * PARAMETERS:  gpe_device      - Parent GPE Device
  *              gpe_number      - GPE level within the GPE block
- *              Flags           - Just disable, or also wake disable?
- *                                Called from ISR or not
+ *              type            - Purpose the GPE won't be used for any more
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Disable an ACPI event (general purpose)
+ * DESCRIPTION: Release a reference to a GPE and disable it if necessary
  *
  ******************************************************************************/
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
 {
 	acpi_status status = AE_OK;
 	acpi_cpu_flags flags;
@@ -306,6 +338,9 @@
 
 	ACPI_FUNCTION_TRACE(acpi_disable_gpe);
 
+	if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+
 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 	/* Ensure that we have a valid GPE number */
 
@@ -315,13 +350,24 @@
 		goto unlock_and_exit;
 	}
 
-	status = acpi_ev_disable_gpe(gpe_event_info);
+	if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) {
+		if (--gpe_event_info->runtime_count == 0)
+			status = acpi_ev_disable_gpe(gpe_event_info);
+	}
+
+	if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) {
+		/*
+		 * Wake-up GPEs are not enabled after leaving system sleep
+		 * states, so we don't need to disable them here.
+		 */
+		if (--gpe_event_info->wakeup_count == 0)
+			acpi_ev_update_gpe_enable_masks(gpe_event_info);
+	}
 
 unlock_and_exit:
 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 	return_ACPI_STATUS(status);
 }
-
 ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
 
 /*******************************************************************************
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 8a95e83..f53fbe3 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -422,11 +422,10 @@
 
 	if (device->wakeup.flags.valid) {
 		/* Button's GPE is run-wake GPE */
-		acpi_set_gpe_type(device->wakeup.gpe_device,
-				  device->wakeup.gpe_number,
-				  ACPI_GPE_TYPE_WAKE_RUN);
 		acpi_enable_gpe(device->wakeup.gpe_device,
-				device->wakeup.gpe_number);
+				device->wakeup.gpe_number,
+				ACPI_GPE_TYPE_WAKE_RUN);
+		device->wakeup.run_wake_count++;
 		device->wakeup.state.enabled = 1;
 	}
 
@@ -446,6 +445,14 @@
 {
 	struct acpi_button *button = acpi_driver_data(device);
 
+	if (device->wakeup.flags.valid) {
+		acpi_disable_gpe(device->wakeup.gpe_device,
+				device->wakeup.gpe_number,
+				ACPI_GPE_TYPE_WAKE_RUN);
+		device->wakeup.run_wake_count--;
+		device->wakeup.state.enabled = 0;
+	}
+
 	acpi_button_remove_fs(device);
 	input_unregister_device(button->input);
 	kfree(button);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d6471bb..27e0b92 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -307,7 +307,11 @@
 	pr_debug(PREFIX "transaction start\n");
 	/* disable GPE during transaction if storm is detected */
 	if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
-		acpi_disable_gpe(NULL, ec->gpe);
+		/*
+		 * It has to be disabled at the hardware level regardless of the
+		 * GPE reference counting, so that it doesn't trigger.
+		 */
+		acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
 	}
 
 	status = acpi_ec_transaction_unlocked(ec, t);
@@ -316,8 +320,12 @@
 	ec_check_sci_sync(ec, acpi_ec_read_status(ec));
 	if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
 		msleep(1);
-		/* it is safe to enable GPE outside of transaction */
-		acpi_enable_gpe(NULL, ec->gpe);
+		/*
+		 * It is safe to enable the GPE outside of the transaction.  Use
+		 * acpi_set_gpe() for that, since we used it to disable the GPE
+		 * above.
+		 */
+		acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
 	} else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
 		pr_info(PREFIX "GPE storm detected, "
 			"transactions will use polling mode\n");
@@ -788,8 +796,8 @@
 				  &acpi_ec_gpe_handler, ec);
 	if (ACPI_FAILURE(status))
 		return -ENODEV;
-	acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
-	acpi_enable_gpe(NULL, ec->gpe);
+
+	acpi_enable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
 	status = acpi_install_address_space_handler(ec->handle,
 						    ACPI_ADR_SPACE_EC,
 						    &acpi_ec_space_handler,
@@ -806,6 +814,7 @@
 		} else {
 			acpi_remove_gpe_handler(NULL, ec->gpe,
 				&acpi_ec_gpe_handler);
+			acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
 			return -ENODEV;
 		}
 	}
@@ -816,6 +825,7 @@
 
 static void ec_remove_handlers(struct acpi_ec *ec)
 {
+	acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
 	if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
 				ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
 		pr_err(PREFIX "failed to remove space handler\n");
@@ -1057,16 +1067,16 @@
 static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state)
 {
 	struct acpi_ec *ec = acpi_driver_data(device);
-	/* Stop using GPE */
-	acpi_disable_gpe(NULL, ec->gpe);
+	/* Stop using the GPE, but keep it reference counted. */
+	acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
 	return 0;
 }
 
 static int acpi_ec_resume(struct acpi_device *device)
 {
 	struct acpi_ec *ec = acpi_driver_data(device);
-	/* Enable use of GPE back */
-	acpi_enable_gpe(NULL, ec->gpe);
+	/* Enable the GPE again, but don't reference count it once more. */
+	acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
 	return 0;
 }
 
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index cb28e05..9c4c962 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -36,8 +36,6 @@
 int acpi_power_init(void);
 int acpi_device_sleep_wake(struct acpi_device *dev,
                            int enable, int sleep_state, int dev_state);
-int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state);
-int acpi_disable_wakeup_device_power(struct acpi_device *dev);
 int acpi_power_get_inferred_state(struct acpi_device *device);
 int acpi_power_transition(struct acpi_device *device, int state);
 extern int acpi_power_nocheck;
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index a5a77b7..2ef0409 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -26,7 +26,9 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/pci.h>
+#include <linux/pci-acpi.h>
 #include <linux/acpi.h>
+#include <linux/pm_runtime.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 
@@ -38,7 +40,13 @@
 	struct pci_dev *dev;
 
 	dev = acpi_get_pci_dev(device->handle);
-	if (!dev || !dev->subordinate)
+	if (!dev)
+		goto out;
+
+	device_set_run_wake(&dev->dev, false);
+	pci_acpi_remove_pm_notifier(device);
+
+	if (!dev->subordinate)
 		goto out;
 
 	acpi_pci_irq_del_prt(dev->subordinate);
@@ -62,6 +70,10 @@
 	if (!dev)
 		return 0;
 
+	pci_acpi_add_pm_notifier(device, dev);
+	if (device->wakeup.flags.run_wake)
+		device_set_run_wake(&dev->dev, true);
+
 	/*
 	 * Install the 'bind' function to facilitate callbacks for
 	 * children of the P2P bridge.
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 64f55b6..d724736 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -30,6 +30,7 @@
 #include <linux/proc_fs.h>
 #include <linux/spinlock.h>
 #include <linux/pm.h>
+#include <linux/pm_runtime.h>
 #include <linux/pci.h>
 #include <linux/pci-acpi.h>
 #include <linux/acpi.h>
@@ -528,6 +529,10 @@
 	if (flags != base_flags)
 		acpi_pci_osc_support(root, flags);
 
+	pci_acpi_add_bus_pm_notifier(device, root->bus);
+	if (device->wakeup.flags.run_wake)
+		device_set_run_wake(root->bus->bridge, true);
+
 	return 0;
 
 end:
@@ -549,6 +554,9 @@
 {
 	struct acpi_pci_root *root = acpi_driver_data(device);
 
+	device_set_run_wake(root->bus->bridge, false);
+	pci_acpi_remove_bus_pm_notifier(device);
+
 	kfree(root);
 	return 0;
 }
@@ -558,6 +566,7 @@
 	if (acpi_pci_disabled)
 		return 0;
 
+	pci_acpi_crs_quirks();
 	if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0)
 		return -ENODEV;
 
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 3e00967..fb7fc24 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -741,19 +741,40 @@
 	return AE_OK;
 }
 
-static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
+static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
 {
-	acpi_status status = 0;
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object *package = NULL;
-	int psw_error;
-
 	struct acpi_device_id button_device_ids[] = {
 		{"PNP0C0D", 0},
 		{"PNP0C0C", 0},
 		{"PNP0C0E", 0},
 		{"", 0},
 	};
+	acpi_status status;
+	acpi_event_status event_status;
+
+	device->wakeup.run_wake_count = 0;
+	device->wakeup.flags.notifier_present = 0;
+
+	/* Power button, Lid switch always enable wakeup */
+	if (!acpi_match_device_ids(device, button_device_ids)) {
+		device->wakeup.flags.run_wake = 1;
+		device->wakeup.flags.always_enabled = 1;
+		return;
+	}
+
+	status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number,
+					ACPI_NOT_ISR, &event_status);
+	if (status == AE_OK)
+		device->wakeup.flags.run_wake =
+				!!(event_status & ACPI_EVENT_FLAG_HANDLE);
+}
+
+static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
+{
+	acpi_status status = 0;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	union acpi_object *package = NULL;
+	int psw_error;
 
 	/* _PRW */
 	status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer);
@@ -773,6 +794,7 @@
 
 	device->wakeup.flags.valid = 1;
 	device->wakeup.prepare_count = 0;
+	acpi_bus_set_run_wake_flags(device);
 	/* Call _PSW/_DSW object to disable its ability to wake the sleeping
 	 * system for the ACPI device with the _PRW object.
 	 * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
@@ -784,10 +806,6 @@
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 				"error in _DSW or _PSW evaluation\n"));
 
-	/* Power button, Lid switch always enable wakeup */
-	if (!acpi_match_device_ids(device, button_device_ids))
-		device->wakeup.flags.run_wake = 1;
-
 end:
 	if (ACPI_FAILURE(status))
 		device->flags.wake_capable = 0;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 79d33d9..3bde594 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -745,9 +745,18 @@
 		return -ENODEV;
 	}
 
-	error = enable ?
-		acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
-		acpi_disable_wakeup_device_power(adev);
+	if (enable) {
+		error = acpi_enable_wakeup_device_power(adev,
+						acpi_target_sleep_state);
+		if (!error)
+			acpi_enable_gpe(adev->wakeup.gpe_device,
+					adev->wakeup.gpe_number,
+					ACPI_GPE_TYPE_WAKE);
+	} else {
+		acpi_disable_gpe(adev->wakeup.gpe_device, adev->wakeup.gpe_number,
+				ACPI_GPE_TYPE_WAKE);
+		error = acpi_disable_wakeup_device_power(adev);
+	}
 	if (!error)
 		dev_info(dev, "wake-up capability %s by ACPI\n",
 				enable ? "enabled" : "disabled");
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index d112829..a206a12 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -387,10 +387,10 @@
 	if (index < num_gpes) {
 		if (!strcmp(buf, "disable\n") &&
 				(status & ACPI_EVENT_FLAG_ENABLED))
-			result = acpi_disable_gpe(handle, index);
+			result = acpi_set_gpe(handle, index, ACPI_GPE_DISABLE);
 		else if (!strcmp(buf, "enable\n") &&
 				!(status & ACPI_EVENT_FLAG_ENABLED))
-			result = acpi_enable_gpe(handle, index);
+			result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE);
 		else if (!strcmp(buf, "clear\n") &&
 				(status & ACPI_EVENT_FLAG_SET))
 			result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index e0ee0c0..4b9d339 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -21,12 +21,12 @@
 ACPI_MODULE_NAME("wakeup_devices")
 
 /**
- * acpi_enable_wakeup_device_prep - prepare wakeup devices
- *	@sleep_state:	ACPI state
- * Enable all wakup devices power if the devices' wakeup level
- * is higher than requested sleep level
+ * acpi_enable_wakeup_device_prep - Prepare wake-up devices.
+ * @sleep_state: ACPI system sleep state.
+ *
+ * Enable all wake-up devices' power, unless the requested system sleep state is
+ * too deep.
  */
-
 void acpi_enable_wakeup_device_prep(u8 sleep_state)
 {
 	struct list_head *node, *next;
@@ -36,9 +36,8 @@
 						       struct acpi_device,
 						       wakeup_list);
 
-		if (!dev->wakeup.flags.valid ||
-		    !dev->wakeup.state.enabled ||
-		    (sleep_state > (u32) dev->wakeup.sleep_state))
+		if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled
+		    || (sleep_state > (u32) dev->wakeup.sleep_state))
 			continue;
 
 		acpi_enable_wakeup_device_power(dev, sleep_state);
@@ -46,9 +45,12 @@
 }
 
 /**
- * acpi_enable_wakeup_device - enable wakeup devices
- *	@sleep_state:	ACPI state
- * Enable all wakup devices's GPE
+ * acpi_enable_wakeup_device - Enable wake-up device GPEs.
+ * @sleep_state: ACPI system sleep state.
+ *
+ * Enable all wake-up devices' GPEs, with the assumption that
+ * acpi_disable_all_gpes() was executed before, so we don't need to disable any
+ * GPEs here.
  */
 void acpi_enable_wakeup_device(u8 sleep_state)
 {
@@ -65,29 +67,22 @@
 		if (!dev->wakeup.flags.valid)
 			continue;
 
-		/* If users want to disable run-wake GPE,
-		 * we only disable it for wake and leave it for runtime
-		 */
 		if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count)
-		    || sleep_state > (u32) dev->wakeup.sleep_state) {
-			if (dev->wakeup.flags.run_wake) {
-				/* set_gpe_type will disable GPE, leave it like that */
-				acpi_set_gpe_type(dev->wakeup.gpe_device,
-						  dev->wakeup.gpe_number,
-						  ACPI_GPE_TYPE_RUNTIME);
-			}
+		    || sleep_state > (u32) dev->wakeup.sleep_state)
 			continue;
-		}
-		if (!dev->wakeup.flags.run_wake)
-			acpi_enable_gpe(dev->wakeup.gpe_device,
-					dev->wakeup.gpe_number);
+
+		/* The wake-up power should have been enabled already. */
+		acpi_set_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+				ACPI_GPE_ENABLE);
 	}
 }
 
 /**
- * acpi_disable_wakeup_device - disable devices' wakeup capability
- *	@sleep_state:	ACPI state
- * Disable all wakup devices's GPE and wakeup capability
+ * acpi_disable_wakeup_device - Disable devices' wakeup capability.
+ * @sleep_state: ACPI system sleep state.
+ *
+ * This function only affects devices with wakeup.state.enabled set, which means
+ * that it reverses the changes made by acpi_enable_wakeup_device_prep().
  */
 void acpi_disable_wakeup_device(u8 sleep_state)
 {
@@ -97,30 +92,11 @@
 		struct acpi_device *dev =
 			container_of(node, struct acpi_device, wakeup_list);
 
-		if (!dev->wakeup.flags.valid)
+		if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled
+		    || (sleep_state > (u32) dev->wakeup.sleep_state))
 			continue;
 
-		if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count)
-		    || sleep_state > (u32) dev->wakeup.sleep_state) {
-			if (dev->wakeup.flags.run_wake) {
-				acpi_set_gpe_type(dev->wakeup.gpe_device,
-						  dev->wakeup.gpe_number,
-						  ACPI_GPE_TYPE_WAKE_RUN);
-				/* Re-enable it, since set_gpe_type will disable it */
-				acpi_enable_gpe(dev->wakeup.gpe_device,
-						dev->wakeup.gpe_number);
-			}
-			continue;
-		}
-
 		acpi_disable_wakeup_device_power(dev);
-		/* Never disable run-wake GPE */
-		if (!dev->wakeup.flags.run_wake) {
-			acpi_disable_gpe(dev->wakeup.gpe_device,
-					 dev->wakeup.gpe_number);
-			acpi_clear_gpe(dev->wakeup.gpe_device,
-				       dev->wakeup.gpe_number, ACPI_NOT_ISR);
-		}
 	}
 }
 
@@ -134,13 +110,11 @@
 						       struct acpi_device,
 						       wakeup_list);
 		/* In case user doesn't load button driver */
-		if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled)
+		if (!dev->wakeup.flags.always_enabled ||
+		    dev->wakeup.state.enabled)
 			continue;
-		acpi_set_gpe_type(dev->wakeup.gpe_device,
-				  dev->wakeup.gpe_number,
-				  ACPI_GPE_TYPE_WAKE_RUN);
-		acpi_enable_gpe(dev->wakeup.gpe_device,
-				dev->wakeup.gpe_number);
+ 		acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+ 				ACPI_GPE_TYPE_WAKE);
 		dev->wakeup.state.enabled = 1;
 	}
 	mutex_unlock(&acpi_device_lock);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b343903..a6a736a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -3082,8 +3082,16 @@
 	ahci_save_initial_config(pdev, hpriv);
 
 	/* prepare host */
-	if (hpriv->cap & HOST_CAP_NCQ)
-		pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
+	if (hpriv->cap & HOST_CAP_NCQ) {
+		pi.flags |= ATA_FLAG_NCQ;
+		/* Auto-activate optimization is supposed to be supported on
+		   all AHCI controllers indicating NCQ support, but it seems
+		   to be broken at least on some NVIDIA MCP79 chipsets.
+		   Until we get info on which NVIDIA chipsets don't have this
+		   issue, if any, disable AA on all NVIDIA AHCIs. */
+		if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
+			pi.flags |= ATA_FLAG_FPDMA_AA;
+	}
 
 	if (hpriv->cap & HOST_CAP_PMP)
 		pi.flags |= ATA_FLAG_PMP;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 51042f0ba7..7eff828 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -243,10 +243,12 @@
 static int __devinit virtblk_probe(struct virtio_device *vdev)
 {
 	struct virtio_blk *vblk;
+	struct request_queue *q;
 	int err;
 	u64 cap;
-	u32 v;
-	u32 blk_size, sg_elems;
+	u32 v, blk_size, sg_elems, opt_io_size;
+	u16 min_io_size;
+	u8 physical_block_exp, alignment_offset;
 
 	if (index_to_minor(index) >= 1 << MINORBITS)
 		return -ENOSPC;
@@ -293,13 +295,13 @@
 		goto out_mempool;
 	}
 
-	vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
-	if (!vblk->disk->queue) {
+	q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
+	if (!q) {
 		err = -ENOMEM;
 		goto out_put_disk;
 	}
 
-	vblk->disk->queue->queuedata = vblk;
+	q->queuedata = vblk;
 
 	if (index < 26) {
 		sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
@@ -323,10 +325,10 @@
 
 	/* If barriers are supported, tell block layer that queue is ordered */
 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
-		blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH,
+		blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH,
 				  virtblk_prepare_flush);
 	else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
-		blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL);
+		blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
 
 	/* If disk is read-only in the host, the guest should obey */
 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
@@ -345,14 +347,14 @@
 	set_capacity(vblk->disk, cap);
 
 	/* We can handle whatever the host told us to handle. */
-	blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
-	blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
+	blk_queue_max_phys_segments(q, vblk->sg_elems-2);
+	blk_queue_max_hw_segments(q, vblk->sg_elems-2);
 
 	/* No need to bounce any requests */
-	blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY);
+	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
 
 	/* No real sector limit. */
-	blk_queue_max_sectors(vblk->disk->queue, -1U);
+	blk_queue_max_sectors(q, -1U);
 
 	/* Host can optionally specify maximum segment size and number of
 	 * segments. */
@@ -360,16 +362,45 @@
 				offsetof(struct virtio_blk_config, size_max),
 				&v);
 	if (!err)
-		blk_queue_max_segment_size(vblk->disk->queue, v);
+		blk_queue_max_segment_size(q, v);
 	else
-		blk_queue_max_segment_size(vblk->disk->queue, -1U);
+		blk_queue_max_segment_size(q, -1U);
 
 	/* Host can optionally specify the block size of the device */
 	err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
 				offsetof(struct virtio_blk_config, blk_size),
 				&blk_size);
 	if (!err)
-		blk_queue_logical_block_size(vblk->disk->queue, blk_size);
+		blk_queue_logical_block_size(q, blk_size);
+	else
+		blk_size = queue_logical_block_size(q);
+
+	/* Use topology information if available */
+	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
+			offsetof(struct virtio_blk_config, physical_block_exp),
+			&physical_block_exp);
+	if (!err && physical_block_exp)
+		blk_queue_physical_block_size(q,
+				blk_size * (1 << physical_block_exp));
+
+	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
+			offsetof(struct virtio_blk_config, alignment_offset),
+			&alignment_offset);
+	if (!err && alignment_offset)
+		blk_queue_alignment_offset(q, blk_size * alignment_offset);
+
+	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
+			offsetof(struct virtio_blk_config, min_io_size),
+			&min_io_size);
+	if (!err && min_io_size)
+		blk_queue_io_min(q, blk_size * min_io_size);
+
+	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
+			offsetof(struct virtio_blk_config, opt_io_size),
+			&opt_io_size);
+	if (!err && opt_io_size)
+		blk_queue_io_opt(q, blk_size * opt_io_size);
+
 
 	add_disk(vblk->disk);
 	return 0;
@@ -412,7 +443,7 @@
 static unsigned int features[] = {
 	VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
 	VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
-	VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH
+	VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
 };
 
 /*
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index e023682..3141dd3 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -666,6 +666,14 @@
 	help
 	  Virtio console for use with lguest and other hypervisors.
 
+	  Also serves as a general-purpose serial device for data
+	  transfer between the guest and host.  Character devices at
+	  /dev/vportNpn will be created when corresponding ports are
+	  found, where N is the device number and n is the port number
+	  within that device.  If specified by the host, a sysfs
+	  attribute called 'name' will be populated with a name for
+	  the port which can be used by udev scripts to create a
+	  symlink to the device.
 
 config HVCS
 	tristate "IBM Hypervisor Virtual Console Server support"
diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
index 0afc8b8..5fe4631 100644
--- a/drivers/char/hvc_beat.c
+++ b/drivers/char/hvc_beat.c
@@ -84,7 +84,7 @@
 	return cnt;
 }
 
-static struct hv_ops hvc_beat_get_put_ops = {
+static const struct hv_ops hvc_beat_get_put_ops = {
 	.get_chars = hvc_beat_get_chars,
 	.put_chars = hvc_beat_put_chars,
 };
@@ -99,7 +99,7 @@
 
 static int __init hvc_beat_console_init(void)
 {
-	if (hvc_beat_useit && machine_is_compatible("Beat")) {
+	if (hvc_beat_useit && of_machine_is_compatible("Beat")) {
 		hvc_instantiate(0, 0, &hvc_beat_get_put_ops);
 	}
 	return 0;
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 416d342..d8dac58 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -125,7 +125,7 @@
  * console interfaces but can still be used as a tty device.  This has to be
  * static because kmalloc will not work during early console init.
  */
-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
 	{[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
 
@@ -247,7 +247,7 @@
  * vty adapters do NOT get an hvc_instantiate() callback since they
  * appear after early console init.
  */
-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
 {
 	struct hvc_struct *hp;
 
@@ -749,7 +749,8 @@
 };
 
 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
-					struct hv_ops *ops, int outbuf_size)
+				       const struct hv_ops *ops,
+				       int outbuf_size)
 {
 	struct hvc_struct *hp;
 	int i;
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 10950ca..52ddf4d 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -55,7 +55,7 @@
 	int outbuf_size;
 	int n_outbuf;
 	uint32_t vtermno;
-	struct hv_ops *ops;
+	const struct hv_ops *ops;
 	int irq_requested;
 	int data;
 	struct winsize ws;
@@ -76,11 +76,12 @@
 };
 
 /* Register a vterm and a slot index for use as a console (console_init) */
-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
+extern int hvc_instantiate(uint32_t vtermno, int index,
+			   const struct hv_ops *ops);
 
 /* register a vterm for hvc tty operation (module_init or hotplug add) */
 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
-				struct hv_ops *ops, int outbuf_size);
+				const struct hv_ops *ops, int outbuf_size);
 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
 extern int hvc_remove(struct hvc_struct *hp);
 
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
index 936d05b..fd02426 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/char/hvc_iseries.c
@@ -197,7 +197,7 @@
 	return sent;
 }
 
-static struct hv_ops hvc_get_put_ops = {
+static const struct hv_ops hvc_get_put_ops = {
 	.get_chars = get_chars,
 	.put_chars = put_chars,
 	.notifier_add = notifier_add_irq,
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index fe62bd0..21681a8 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -922,7 +922,7 @@
 
 
 /* HVC operations */
-static struct hv_ops hvc_iucv_ops = {
+static const struct hv_ops hvc_iucv_ops = {
 	.get_chars = hvc_iucv_get_chars,
 	.put_chars = hvc_iucv_put_chars,
 	.notifier_add = hvc_iucv_notifier_add,
diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
index 88590d0..61c4a61 100644
--- a/drivers/char/hvc_rtas.c
+++ b/drivers/char/hvc_rtas.c
@@ -71,7 +71,7 @@
 	return i;
 }
 
-static struct hv_ops hvc_rtas_get_put_ops = {
+static const struct hv_ops hvc_rtas_get_put_ops = {
 	.get_chars = hvc_rtas_read_console,
 	.put_chars = hvc_rtas_write_console,
 };
diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
index bd63ba8..b0957e6 100644
--- a/drivers/char/hvc_udbg.c
+++ b/drivers/char/hvc_udbg.c
@@ -58,7 +58,7 @@
 	return i;
 }
 
-static struct hv_ops hvc_udbg_ops = {
+static const struct hv_ops hvc_udbg_ops = {
 	.get_chars = hvc_udbg_get,
 	.put_chars = hvc_udbg_put,
 };
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 10be343..27370e9 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -77,7 +77,7 @@
 	return got;
 }
 
-static struct hv_ops hvc_get_put_ops = {
+static const struct hv_ops hvc_get_put_ops = {
 	.get_chars = filtered_get_chars,
 	.put_chars = hvc_put_chars,
 	.notifier_add = notifier_add_irq,
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index b1a7163..60446f8 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -122,7 +122,7 @@
 	return recv;
 }
 
-static struct hv_ops hvc_ops = {
+static const struct hv_ops hvc_ops = {
 	.get_chars = read_console,
 	.put_chars = write_console,
 	.notifier_add = notifier_add_irq,
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 8706026..6ea1014 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -186,3 +186,15 @@
 	  module will be called mxc-rnga.
 
 	  If unsure, say Y.
+
+config HW_RANDOM_NOMADIK
+	tristate "ST-Ericsson Nomadik Random Number Generator support"
+	depends on HW_RANDOM && PLAT_NOMADIK
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on ST-Ericsson SoCs (8815 and 8500).
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called nomadik-rng.
+
+	  If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 5eeb130..4273308 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -18,3 +18,4 @@
 obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
 obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
 obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
+obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
new file mode 100644
index 0000000..a8b4c40
--- /dev/null
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -0,0 +1,103 @@
+/*
+ * Nomadik RNG support
+ *  Copyright 2009 Alessandro Rubini
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+
+static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	void __iomem *base = (void __iomem *)rng->priv;
+
+	/*
+	 * The register is 32 bits and gives 16 random bits (low half).
+	 * A subsequent read will delay the core for 400ns, so we just read
+	 * once and accept the very unlikely very small delay, even if wait==0.
+	 */
+	*(u16 *)data = __raw_readl(base + 8) & 0xffff;
+	return 2;
+}
+
+/* we have at most one RNG per machine, granted */
+static struct hwrng nmk_rng = {
+	.name		= "nomadik",
+	.read		= nmk_rng_read,
+};
+
+static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id)
+{
+	void __iomem *base;
+	int ret;
+
+	ret = amba_request_regions(dev, dev->dev.init_name);
+	if (ret)
+		return ret;
+	ret = -ENOMEM;
+	base = ioremap(dev->res.start, resource_size(&dev->res));
+	if (!base)
+		goto out_release;
+	nmk_rng.priv = (unsigned long)base;
+	ret = hwrng_register(&nmk_rng);
+	if (ret)
+		goto out_unmap;
+	return 0;
+
+out_unmap:
+	iounmap(base);
+out_release:
+	amba_release_regions(dev);
+	return ret;
+}
+
+static int nmk_rng_remove(struct amba_device *dev)
+{
+	void __iomem *base = (void __iomem *)nmk_rng.priv;
+	hwrng_unregister(&nmk_rng);
+	iounmap(base);
+	amba_release_regions(dev);
+	return 0;
+}
+
+static struct amba_id nmk_rng_ids[] = {
+	{
+		.id	= 0x000805e1,
+		.mask	= 0x000fffff, /* top bits are rev and cfg: accept all */
+	},
+	{0, 0},
+};
+
+static struct amba_driver nmk_rng_driver = {
+	.drv = {
+		.owner = THIS_MODULE,
+		.name = "rng",
+		},
+	.probe = nmk_rng_probe,
+	.remove = nmk_rng_remove,
+	.id_table = nmk_rng_ids,
+};
+
+static int __init nmk_rng_init(void)
+{
+	return amba_driver_register(&nmk_rng_driver);
+}
+
+static void __devexit nmk_rng_exit(void)
+{
+	amba_driver_unregister(&nmk_rng_driver);
+}
+
+module_init(nmk_rng_init);
+module_exit(nmk_rng_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index a035ae3..213373b 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1,18 +1,6 @@
-/*D:300
- * The Guest console driver
- *
- * Writing console drivers is one of the few remaining Dark Arts in Linux.
- * Fortunately for us, the path of virtual consoles has been well-trodden by
- * the PowerPC folks, who wrote "hvc_console.c" to generically support any
- * virtual console.  We use that infrastructure which only requires us to write
- * the basic put_chars and get_chars functions and call the right register
- * functions.
- :*/
-
-/*M:002 The console can be flooded: while the Guest is processing input the
- * Host can send more.  Buffering in the Host could alleviate this, but it is a
- * difficult problem in general. :*/
-/* Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation
+/*
+ * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
+ * Copyright (C) 2009, 2010 Red Hat, Inc.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -28,142 +16,694 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
 #include <linux/err.h>
+#include <linux/fs.h>
 #include <linux/init.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
 #include <linux/virtio.h>
 #include <linux/virtio_console.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
 #include "hvc_console.h"
 
-/*D:340 These represent our input and output console queues, and the virtio
- * operations for them. */
-static struct virtqueue *in_vq, *out_vq;
-static struct virtio_device *vdev;
-
-/* This is our input buffer, and how much data is left in it. */
-static unsigned int in_len;
-static char *in, *inbuf;
-
-/* The operations for our console. */
-static struct hv_ops virtio_cons;
-
-/* The hvc device */
-static struct hvc_struct *hvc;
-
-/*D:310 The put_chars() callback is pretty straightforward.
+/*
+ * This is a global struct for storing common data for all the devices
+ * this driver handles.
  *
- * We turn the characters into a scatter-gather list, add it to the output
- * queue and then kick the Host.  Then we sit here waiting for it to finish:
- * inefficient in theory, but in practice implementations will do it
- * immediately (lguest's Launcher does). */
-static int put_chars(u32 vtermno, const char *buf, int count)
+ * Mainly, it has a linked list for all the consoles in one place so
+ * that callbacks from hvc for get_chars(), put_chars() work properly
+ * across multiple devices and multiple ports per device.
+ */
+struct ports_driver_data {
+	/* Used for registering chardevs */
+	struct class *class;
+
+	/* Used for exporting per-port information to debugfs */
+	struct dentry *debugfs_dir;
+
+	/* Number of devices this driver is handling */
+	unsigned int index;
+
+	/*
+	 * This is used to keep track of the number of hvc consoles
+	 * spawned by this driver.  This number is given as the first
+	 * argument to hvc_alloc().  To correctly map an initial
+	 * console spawned via hvc_instantiate to the console being
+	 * hooked up via hvc_alloc, we need to pass the same vtermno.
+	 *
+	 * We also just assume the first console being initialised was
+	 * the first one that got used as the initial console.
+	 */
+	unsigned int next_vtermno;
+
+	/* All the console devices handled by this driver */
+	struct list_head consoles;
+};
+static struct ports_driver_data pdrvdata;
+
+DEFINE_SPINLOCK(pdrvdata_lock);
+
+/* This struct holds information that's relevant only for console ports */
+struct console {
+	/* We'll place all consoles in a list in the pdrvdata struct */
+	struct list_head list;
+
+	/* The hvc device associated with this console port */
+	struct hvc_struct *hvc;
+
+	/*
+	 * This number identifies the number that we used to register
+	 * with hvc in hvc_instantiate() and hvc_alloc(); this is the
+	 * number passed on by the hvc callbacks to us to
+	 * differentiate between the other console ports handled by
+	 * this driver
+	 */
+	u32 vtermno;
+};
+
+struct port_buffer {
+	char *buf;
+
+	/* size of the buffer in *buf above */
+	size_t size;
+
+	/* used length of the buffer */
+	size_t len;
+	/* offset in the buf from which to consume data */
+	size_t offset;
+};
+
+/*
+ * This is a per-device struct that stores data common to all the
+ * ports for that device (vdev->priv).
+ */
+struct ports_device {
+	/*
+	 * Workqueue handlers where we process deferred work after
+	 * notification
+	 */
+	struct work_struct control_work;
+	struct work_struct config_work;
+
+	struct list_head ports;
+
+	/* To protect the list of ports */
+	spinlock_t ports_lock;
+
+	/* To protect the vq operations for the control channel */
+	spinlock_t cvq_lock;
+
+	/* The current config space is stored here */
+	struct virtio_console_config config;
+
+	/* The virtio device we're associated with */
+	struct virtio_device *vdev;
+
+	/*
+	 * A couple of virtqueues for the control channel: one for
+	 * guest->host transfers, one for host->guest transfers
+	 */
+	struct virtqueue *c_ivq, *c_ovq;
+
+	/* Array of per-port IO virtqueues */
+	struct virtqueue **in_vqs, **out_vqs;
+
+	/* Used for numbering devices for sysfs and debugfs */
+	unsigned int drv_index;
+
+	/* Major number for this device.  Ports will be created as minors. */
+	int chr_major;
+};
+
+/* This struct holds the per-port data */
+struct port {
+	/* Next port in the list, head is in the ports_device */
+	struct list_head list;
+
+	/* Pointer to the parent virtio_console device */
+	struct ports_device *portdev;
+
+	/* The current buffer from which data has to be fed to readers */
+	struct port_buffer *inbuf;
+
+	/*
+	 * To protect the operations on the in_vq associated with this
+	 * port.  Has to be a spinlock because it can be called from
+	 * interrupt context (get_char()).
+	 */
+	spinlock_t inbuf_lock;
+
+	/* The IO vqs for this port */
+	struct virtqueue *in_vq, *out_vq;
+
+	/* File in the debugfs directory that exposes this port's information */
+	struct dentry *debugfs_file;
+
+	/*
+	 * The entries in this struct will be valid if this port is
+	 * hooked up to an hvc console
+	 */
+	struct console cons;
+
+	/* Each port associates with a separate char device */
+	struct cdev cdev;
+	struct device *dev;
+
+	/* A waitqueue for poll() or blocking read operations */
+	wait_queue_head_t waitqueue;
+
+	/* The 'name' of the port that we expose via sysfs properties */
+	char *name;
+
+	/* The 'id' to identify the port with the Host */
+	u32 id;
+
+	/* Is the host device open */
+	bool host_connected;
+
+	/* We should allow only one process to open a port */
+	bool guest_connected;
+};
+
+/* This is the very early arch-specified put chars function. */
+static int (*early_put_chars)(u32, const char *, int);
+
+static struct port *find_port_by_vtermno(u32 vtermno)
 {
-	struct scatterlist sg[1];
+	struct port *port;
+	struct console *cons;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdrvdata_lock, flags);
+	list_for_each_entry(cons, &pdrvdata.consoles, list) {
+		if (cons->vtermno == vtermno) {
+			port = container_of(cons, struct port, cons);
+			goto out;
+		}
+	}
+	port = NULL;
+out:
+	spin_unlock_irqrestore(&pdrvdata_lock, flags);
+	return port;
+}
+
+static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
+{
+	struct port *port;
+	unsigned long flags;
+
+	spin_lock_irqsave(&portdev->ports_lock, flags);
+	list_for_each_entry(port, &portdev->ports, list)
+		if (port->id == id)
+			goto out;
+	port = NULL;
+out:
+	spin_unlock_irqrestore(&portdev->ports_lock, flags);
+
+	return port;
+}
+
+static struct port *find_port_by_vq(struct ports_device *portdev,
+				    struct virtqueue *vq)
+{
+	struct port *port;
+	unsigned long flags;
+
+	spin_lock_irqsave(&portdev->ports_lock, flags);
+	list_for_each_entry(port, &portdev->ports, list)
+		if (port->in_vq == vq || port->out_vq == vq)
+			goto out;
+	port = NULL;
+out:
+	spin_unlock_irqrestore(&portdev->ports_lock, flags);
+	return port;
+}
+
+static bool is_console_port(struct port *port)
+{
+	if (port->cons.hvc)
+		return true;
+	return false;
+}
+
+static inline bool use_multiport(struct ports_device *portdev)
+{
+	/*
+	 * This condition can be true when put_chars is called from
+	 * early_init
+	 */
+	if (!portdev->vdev)
+		return 0;
+	return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
+}
+
+static void free_buf(struct port_buffer *buf)
+{
+	kfree(buf->buf);
+	kfree(buf);
+}
+
+static struct port_buffer *alloc_buf(size_t buf_size)
+{
+	struct port_buffer *buf;
+
+	buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		goto fail;
+	buf->buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf->buf)
+		goto free_buf;
+	buf->len = 0;
+	buf->offset = 0;
+	buf->size = buf_size;
+	return buf;
+
+free_buf:
+	kfree(buf);
+fail:
+	return NULL;
+}
+
+/* Callers should take appropriate locks */
+static void *get_inbuf(struct port *port)
+{
+	struct port_buffer *buf;
+	struct virtqueue *vq;
 	unsigned int len;
 
-	/* This is a convenient routine to initialize a single-elem sg list */
-	sg_init_one(sg, buf, count);
-
-	/* add_buf wants a token to identify this buffer: we hand it any
-	 * non-NULL pointer, since there's only ever one buffer. */
-	if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) >= 0) {
-		/* Tell Host to go! */
-		out_vq->vq_ops->kick(out_vq);
-		/* Chill out until it's done with the buffer. */
-		while (!out_vq->vq_ops->get_buf(out_vq, &len))
-			cpu_relax();
+	vq = port->in_vq;
+	buf = vq->vq_ops->get_buf(vq, &len);
+	if (buf) {
+		buf->len = len;
+		buf->offset = 0;
 	}
-
-	/* We're expected to return the amount of data we wrote: all of it. */
-	return count;
+	return buf;
 }
 
-/* Create a scatter-gather list representing our input buffer and put it in the
- * queue. */
-static void add_inbuf(void)
+/*
+ * Create a scatter-gather list representing our input buffer and put
+ * it in the queue.
+ *
+ * Callers should take appropriate locks.
+ */
+static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
 {
 	struct scatterlist sg[1];
-	sg_init_one(sg, inbuf, PAGE_SIZE);
+	int ret;
 
-	/* We should always be able to add one buffer to an empty queue. */
-	if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) < 0)
-		BUG();
-	in_vq->vq_ops->kick(in_vq);
+	sg_init_one(sg, buf->buf, buf->size);
+
+	ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf);
+	vq->vq_ops->kick(vq);
+	return ret;
 }
 
-/*D:350 get_chars() is the callback from the hvc_console infrastructure when
- * an interrupt is received.
+/* Discard any unread data this port has. Callers lockers. */
+static void discard_port_data(struct port *port)
+{
+	struct port_buffer *buf;
+	struct virtqueue *vq;
+	unsigned int len;
+	int ret;
+
+	vq = port->in_vq;
+	if (port->inbuf)
+		buf = port->inbuf;
+	else
+		buf = vq->vq_ops->get_buf(vq, &len);
+
+	ret = 0;
+	while (buf) {
+		if (add_inbuf(vq, buf) < 0) {
+			ret++;
+			free_buf(buf);
+		}
+		buf = vq->vq_ops->get_buf(vq, &len);
+	}
+	port->inbuf = NULL;
+	if (ret)
+		dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
+			 ret);
+}
+
+static bool port_has_data(struct port *port)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&port->inbuf_lock, flags);
+	if (port->inbuf) {
+		ret = true;
+		goto out;
+	}
+	port->inbuf = get_inbuf(port);
+	if (port->inbuf) {
+		ret = true;
+		goto out;
+	}
+	ret = false;
+out:
+	spin_unlock_irqrestore(&port->inbuf_lock, flags);
+	return ret;
+}
+
+static ssize_t send_control_msg(struct port *port, unsigned int event,
+				unsigned int value)
+{
+	struct scatterlist sg[1];
+	struct virtio_console_control cpkt;
+	struct virtqueue *vq;
+	int len;
+
+	if (!use_multiport(port->portdev))
+		return 0;
+
+	cpkt.id = port->id;
+	cpkt.event = event;
+	cpkt.value = value;
+
+	vq = port->portdev->c_ovq;
+
+	sg_init_one(sg, &cpkt, sizeof(cpkt));
+	if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+		vq->vq_ops->kick(vq);
+		while (!vq->vq_ops->get_buf(vq, &len))
+			cpu_relax();
+	}
+	return 0;
+}
+
+static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count)
+{
+	struct scatterlist sg[1];
+	struct virtqueue *out_vq;
+	ssize_t ret;
+	unsigned int len;
+
+	out_vq = port->out_vq;
+
+	sg_init_one(sg, in_buf, in_count);
+	ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf);
+
+	/* Tell Host to go! */
+	out_vq->vq_ops->kick(out_vq);
+
+	if (ret < 0) {
+		len = 0;
+		goto fail;
+	}
+
+	/*
+	 * Wait till the host acknowledges it pushed out the data we
+	 * sent. Also ensure we return to userspace the number of
+	 * bytes that were successfully consumed by the host.
+	 */
+	while (!out_vq->vq_ops->get_buf(out_vq, &len))
+		cpu_relax();
+fail:
+	/* We're expected to return the amount of data we wrote */
+	return len;
+}
+
+/*
+ * Give out the data that's requested from the buffer that we have
+ * queued up.
+ */
+static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
+			    bool to_user)
+{
+	struct port_buffer *buf;
+	unsigned long flags;
+
+	if (!out_count || !port_has_data(port))
+		return 0;
+
+	buf = port->inbuf;
+	out_count = min(out_count, buf->len - buf->offset);
+
+	if (to_user) {
+		ssize_t ret;
+
+		ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
+		if (ret)
+			return -EFAULT;
+	} else {
+		memcpy(out_buf, buf->buf + buf->offset, out_count);
+	}
+
+	buf->offset += out_count;
+
+	if (buf->offset == buf->len) {
+		/*
+		 * We're done using all the data in this buffer.
+		 * Re-queue so that the Host can send us more data.
+		 */
+		spin_lock_irqsave(&port->inbuf_lock, flags);
+		port->inbuf = NULL;
+
+		if (add_inbuf(port->in_vq, buf) < 0)
+			dev_warn(port->dev, "failed add_buf\n");
+
+		spin_unlock_irqrestore(&port->inbuf_lock, flags);
+	}
+	/* Return the number of bytes actually copied */
+	return out_count;
+}
+
+/* The condition that must be true for polling to end */
+static bool wait_is_over(struct port *port)
+{
+	return port_has_data(port) || !port->host_connected;
+}
+
+static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
+			      size_t count, loff_t *offp)
+{
+	struct port *port;
+	ssize_t ret;
+
+	port = filp->private_data;
+
+	if (!port_has_data(port)) {
+		/*
+		 * If nothing's connected on the host just return 0 in
+		 * case of list_empty; this tells the userspace app
+		 * that there's no connection
+		 */
+		if (!port->host_connected)
+			return 0;
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+
+		ret = wait_event_interruptible(port->waitqueue,
+					       wait_is_over(port));
+		if (ret < 0)
+			return ret;
+	}
+	/*
+	 * We could've received a disconnection message while we were
+	 * waiting for more data.
+	 *
+	 * This check is not clubbed in the if() statement above as we
+	 * might receive some data as well as the host could get
+	 * disconnected after we got woken up from our wait.  So we
+	 * really want to give off whatever data we have and only then
+	 * check for host_connected.
+	 */
+	if (!port_has_data(port) && !port->host_connected)
+		return 0;
+
+	return fill_readbuf(port, ubuf, count, true);
+}
+
+static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
+			       size_t count, loff_t *offp)
+{
+	struct port *port;
+	char *buf;
+	ssize_t ret;
+
+	port = filp->private_data;
+
+	count = min((size_t)(32 * 1024), count);
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = copy_from_user(buf, ubuf, count);
+	if (ret) {
+		ret = -EFAULT;
+		goto free_buf;
+	}
+
+	ret = send_buf(port, buf, count);
+free_buf:
+	kfree(buf);
+	return ret;
+}
+
+static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
+{
+	struct port *port;
+	unsigned int ret;
+
+	port = filp->private_data;
+	poll_wait(filp, &port->waitqueue, wait);
+
+	ret = 0;
+	if (port->inbuf)
+		ret |= POLLIN | POLLRDNORM;
+	if (port->host_connected)
+		ret |= POLLOUT;
+	if (!port->host_connected)
+		ret |= POLLHUP;
+
+	return ret;
+}
+
+static int port_fops_release(struct inode *inode, struct file *filp)
+{
+	struct port *port;
+
+	port = filp->private_data;
+
+	/* Notify host of port being closed */
+	send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
+
+	spin_lock_irq(&port->inbuf_lock);
+	port->guest_connected = false;
+
+	discard_port_data(port);
+
+	spin_unlock_irq(&port->inbuf_lock);
+
+	return 0;
+}
+
+static int port_fops_open(struct inode *inode, struct file *filp)
+{
+	struct cdev *cdev = inode->i_cdev;
+	struct port *port;
+
+	port = container_of(cdev, struct port, cdev);
+	filp->private_data = port;
+
+	/*
+	 * Don't allow opening of console port devices -- that's done
+	 * via /dev/hvc
+	 */
+	if (is_console_port(port))
+		return -ENXIO;
+
+	/* Allow only one process to open a particular port at a time */
+	spin_lock_irq(&port->inbuf_lock);
+	if (port->guest_connected) {
+		spin_unlock_irq(&port->inbuf_lock);
+		return -EMFILE;
+	}
+
+	port->guest_connected = true;
+	spin_unlock_irq(&port->inbuf_lock);
+
+	/* Notify host of port being opened */
+	send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
+
+	return 0;
+}
+
+/*
+ * The file operations that we support: programs in the guest can open
+ * a console device, read from it, write to it, poll for data and
+ * close it.  The devices are at
+ *   /dev/vport<device number>p<port number>
+ */
+static const struct file_operations port_fops = {
+	.owner = THIS_MODULE,
+	.open  = port_fops_open,
+	.read  = port_fops_read,
+	.write = port_fops_write,
+	.poll  = port_fops_poll,
+	.release = port_fops_release,
+};
+
+/*
+ * The put_chars() callback is pretty straightforward.
  *
- * Most of the code deals with the fact that the hvc_console() infrastructure
- * only asks us for 16 bytes at a time.  We keep in_offset and in_used fields
- * for partially-filled buffers. */
+ * We turn the characters into a scatter-gather list, add it to the
+ * output queue and then kick the Host.  Then we sit here waiting for
+ * it to finish: inefficient in theory, but in practice
+ * implementations will do it immediately (lguest's Launcher does).
+ */
+static int put_chars(u32 vtermno, const char *buf, int count)
+{
+	struct port *port;
+
+	port = find_port_by_vtermno(vtermno);
+	if (!port)
+		return 0;
+
+	if (unlikely(early_put_chars))
+		return early_put_chars(vtermno, buf, count);
+
+	return send_buf(port, (void *)buf, count);
+}
+
+/*
+ * get_chars() is the callback from the hvc_console infrastructure
+ * when an interrupt is received.
+ *
+ * We call out to fill_readbuf that gets us the required data from the
+ * buffers that are queued up.
+ */
 static int get_chars(u32 vtermno, char *buf, int count)
 {
+	struct port *port;
+
+	port = find_port_by_vtermno(vtermno);
+	if (!port)
+		return 0;
+
 	/* If we don't have an input queue yet, we can't get input. */
-	BUG_ON(!in_vq);
+	BUG_ON(!port->in_vq);
 
-	/* No buffer?  Try to get one. */
-	if (!in_len) {
-		in = in_vq->vq_ops->get_buf(in_vq, &in_len);
-		if (!in)
-			return 0;
-	}
-
-	/* You want more than we have to give?  Well, try wanting less! */
-	if (in_len < count)
-		count = in_len;
-
-	/* Copy across to their buffer and increment offset. */
-	memcpy(buf, in, count);
-	in += count;
-	in_len -= count;
-
-	/* Finished?  Re-register buffer so Host will use it again. */
-	if (in_len == 0)
-		add_inbuf();
-
-	return count;
-}
-/*:*/
-
-/*D:320 Console drivers are initialized very early so boot messages can go out,
- * so we do things slightly differently from the generic virtio initialization
- * of the net and block drivers.
- *
- * At this stage, the console is output-only.  It's too early to set up a
- * virtqueue, so we let the drivers do some boutique early-output thing. */
-int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
-{
-	virtio_cons.put_chars = put_chars;
-	return hvc_instantiate(0, 0, &virtio_cons);
+	return fill_readbuf(port, buf, count, false);
 }
 
-/*
- * virtio console configuration. This supports:
- * - console resize
- */
-static void virtcons_apply_config(struct virtio_device *dev)
+static void resize_console(struct port *port)
 {
+	struct virtio_device *vdev;
 	struct winsize ws;
 
-	if (virtio_has_feature(dev, VIRTIO_CONSOLE_F_SIZE)) {
-		dev->config->get(dev,
-				 offsetof(struct virtio_console_config, cols),
-				 &ws.ws_col, sizeof(u16));
-		dev->config->get(dev,
-				 offsetof(struct virtio_console_config, rows),
-				 &ws.ws_row, sizeof(u16));
-		hvc_resize(hvc, ws);
+	vdev = port->portdev->vdev;
+	if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) {
+		vdev->config->get(vdev,
+				  offsetof(struct virtio_console_config, cols),
+				  &ws.ws_col, sizeof(u16));
+		vdev->config->get(vdev,
+				  offsetof(struct virtio_console_config, rows),
+				  &ws.ws_row, sizeof(u16));
+		hvc_resize(port->cons.hvc, ws);
 	}
 }
 
-/*
- * we support only one console, the hvc struct is a global var
- * We set the configuration at this point, since we now have a tty
- */
+/* We set the configuration at this point, since we now have a tty */
 static int notifier_add_vio(struct hvc_struct *hp, int data)
 {
+	struct port *port;
+
+	port = find_port_by_vtermno(hp->vtermno);
+	if (!port)
+		return -EINVAL;
+
 	hp->irq_requested = 1;
-	virtcons_apply_config(vdev);
+	resize_console(port);
 
 	return 0;
 }
@@ -173,79 +713,797 @@
 	hp->irq_requested = 0;
 }
 
-static void hvc_handle_input(struct virtqueue *vq)
+/* The operations for console ports. */
+static const struct hv_ops hv_ops = {
+	.get_chars = get_chars,
+	.put_chars = put_chars,
+	.notifier_add = notifier_add_vio,
+	.notifier_del = notifier_del_vio,
+	.notifier_hangup = notifier_del_vio,
+};
+
+/*
+ * Console drivers are initialized very early so boot messages can go
+ * out, so we do things slightly differently from the generic virtio
+ * initialization of the net and block drivers.
+ *
+ * At this stage, the console is output-only.  It's too early to set
+ * up a virtqueue, so we let the drivers do some boutique early-output
+ * thing.
+ */
+int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
 {
-	if (hvc_poll(hvc))
+	early_put_chars = put_chars;
+	return hvc_instantiate(0, 0, &hv_ops);
+}
+
+int init_port_console(struct port *port)
+{
+	int ret;
+
+	/*
+	 * The Host's telling us this port is a console port.  Hook it
+	 * up with an hvc console.
+	 *
+	 * To set up and manage our virtual console, we call
+	 * hvc_alloc().
+	 *
+	 * The first argument of hvc_alloc() is the virtual console
+	 * number.  The second argument is the parameter for the
+	 * notification mechanism (like irq number).  We currently
+	 * leave this as zero, virtqueues have implicit notifications.
+	 *
+	 * The third argument is a "struct hv_ops" containing the
+	 * put_chars() get_chars(), notifier_add() and notifier_del()
+	 * pointers.  The final argument is the output buffer size: we
+	 * can do any size, so we put PAGE_SIZE here.
+	 */
+	port->cons.vtermno = pdrvdata.next_vtermno;
+
+	port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
+	if (IS_ERR(port->cons.hvc)) {
+		ret = PTR_ERR(port->cons.hvc);
+		dev_err(port->dev,
+			"error %d allocating hvc for port\n", ret);
+		port->cons.hvc = NULL;
+		return ret;
+	}
+	spin_lock_irq(&pdrvdata_lock);
+	pdrvdata.next_vtermno++;
+	list_add_tail(&port->cons.list, &pdrvdata.consoles);
+	spin_unlock_irq(&pdrvdata_lock);
+	port->guest_connected = true;
+
+	/* Notify host of port being opened */
+	send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
+
+	return 0;
+}
+
+static ssize_t show_port_name(struct device *dev,
+			      struct device_attribute *attr, char *buffer)
+{
+	struct port *port;
+
+	port = dev_get_drvdata(dev);
+
+	return sprintf(buffer, "%s\n", port->name);
+}
+
+static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
+
+static struct attribute *port_sysfs_entries[] = {
+	&dev_attr_name.attr,
+	NULL
+};
+
+static struct attribute_group port_attribute_group = {
+	.name = NULL,		/* put in device directory */
+	.attrs = port_sysfs_entries,
+};
+
+static int debugfs_open(struct inode *inode, struct file *filp)
+{
+	filp->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
+			    size_t count, loff_t *offp)
+{
+	struct port *port;
+	char *buf;
+	ssize_t ret, out_offset, out_count;
+
+	out_count = 1024;
+	buf = kmalloc(out_count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	port = filp->private_data;
+	out_offset = 0;
+	out_offset += snprintf(buf + out_offset, out_count,
+			       "name: %s\n", port->name ? port->name : "");
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "guest_connected: %d\n", port->guest_connected);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "host_connected: %d\n", port->host_connected);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "is_console: %s\n",
+			       is_console_port(port) ? "yes" : "no");
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "console_vtermno: %u\n", port->cons.vtermno);
+
+	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations port_debugfs_ops = {
+	.owner = THIS_MODULE,
+	.open  = debugfs_open,
+	.read  = debugfs_read,
+};
+
+/* Remove all port-specific data. */
+static int remove_port(struct port *port)
+{
+	struct port_buffer *buf;
+
+	spin_lock_irq(&port->portdev->ports_lock);
+	list_del(&port->list);
+	spin_unlock_irq(&port->portdev->ports_lock);
+
+	if (is_console_port(port)) {
+		spin_lock_irq(&pdrvdata_lock);
+		list_del(&port->cons.list);
+		spin_unlock_irq(&pdrvdata_lock);
+		hvc_remove(port->cons.hvc);
+	}
+	if (port->guest_connected)
+		send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
+
+	sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+	device_destroy(pdrvdata.class, port->dev->devt);
+	cdev_del(&port->cdev);
+
+	/* Remove unused data this port might have received. */
+	discard_port_data(port);
+
+	/* Remove buffers we queued up for the Host to send us data in. */
+	while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
+		free_buf(buf);
+
+	kfree(port->name);
+
+	debugfs_remove(port->debugfs_file);
+
+	kfree(port);
+	return 0;
+}
+
+/* Any private messages that the Host and Guest want to share */
+static void handle_control_message(struct ports_device *portdev,
+				   struct port_buffer *buf)
+{
+	struct virtio_console_control *cpkt;
+	struct port *port;
+	size_t name_size;
+	int err;
+
+	cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
+
+	port = find_port_by_id(portdev, cpkt->id);
+	if (!port) {
+		/* No valid header at start of buffer.  Drop it. */
+		dev_dbg(&portdev->vdev->dev,
+			"Invalid index %u in control packet\n", cpkt->id);
+		return;
+	}
+
+	switch (cpkt->event) {
+	case VIRTIO_CONSOLE_CONSOLE_PORT:
+		if (!cpkt->value)
+			break;
+		if (is_console_port(port))
+			break;
+
+		init_port_console(port);
+		/*
+		 * Could remove the port here in case init fails - but
+		 * have to notify the host first.
+		 */
+		break;
+	case VIRTIO_CONSOLE_RESIZE:
+		if (!is_console_port(port))
+			break;
+		port->cons.hvc->irq_requested = 1;
+		resize_console(port);
+		break;
+	case VIRTIO_CONSOLE_PORT_OPEN:
+		port->host_connected = cpkt->value;
+		wake_up_interruptible(&port->waitqueue);
+		break;
+	case VIRTIO_CONSOLE_PORT_NAME:
+		/*
+		 * Skip the size of the header and the cpkt to get the size
+		 * of the name that was sent
+		 */
+		name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
+
+		port->name = kmalloc(name_size, GFP_KERNEL);
+		if (!port->name) {
+			dev_err(port->dev,
+				"Not enough space to store port name\n");
+			break;
+		}
+		strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
+			name_size - 1);
+		port->name[name_size - 1] = 0;
+
+		/*
+		 * Since we only have one sysfs attribute, 'name',
+		 * create it only if we have a name for the port.
+		 */
+		err = sysfs_create_group(&port->dev->kobj,
+					 &port_attribute_group);
+		if (err)
+			dev_err(port->dev,
+				"Error %d creating sysfs device attributes\n",
+				err);
+
+		break;
+	case VIRTIO_CONSOLE_PORT_REMOVE:
+		/*
+		 * Hot unplug the port.  We don't decrement nr_ports
+		 * since we don't want to deal with extra complexities
+		 * of using the lowest-available port id: We can just
+		 * pick up the nr_ports number as the id and not have
+		 * userspace send it to us.  This helps us in two
+		 * ways:
+		 *
+		 * - We don't need to have a 'port_id' field in the
+		 *   config space when a port is hot-added.  This is a
+		 *   good thing as we might queue up multiple hotplug
+		 *   requests issued in our workqueue.
+		 *
+		 * - Another way to deal with this would have been to
+		 *   use a bitmap of the active ports and select the
+		 *   lowest non-active port from that map.  That
+		 *   bloats the already tight config space and we
+		 *   would end up artificially limiting the
+		 *   max. number of ports to sizeof(bitmap).  Right
+		 *   now we can support 2^32 ports (as the port id is
+		 *   stored in a u32 type).
+		 *
+		 */
+		remove_port(port);
+		break;
+	}
+}
+
+static void control_work_handler(struct work_struct *work)
+{
+	struct ports_device *portdev;
+	struct virtqueue *vq;
+	struct port_buffer *buf;
+	unsigned int len;
+
+	portdev = container_of(work, struct ports_device, control_work);
+	vq = portdev->c_ivq;
+
+	spin_lock(&portdev->cvq_lock);
+	while ((buf = vq->vq_ops->get_buf(vq, &len))) {
+		spin_unlock(&portdev->cvq_lock);
+
+		buf->len = len;
+		buf->offset = 0;
+
+		handle_control_message(portdev, buf);
+
+		spin_lock(&portdev->cvq_lock);
+		if (add_inbuf(portdev->c_ivq, buf) < 0) {
+			dev_warn(&portdev->vdev->dev,
+				 "Error adding buffer to queue\n");
+			free_buf(buf);
+		}
+	}
+	spin_unlock(&portdev->cvq_lock);
+}
+
+static void in_intr(struct virtqueue *vq)
+{
+	struct port *port;
+	unsigned long flags;
+
+	port = find_port_by_vq(vq->vdev->priv, vq);
+	if (!port)
+		return;
+
+	spin_lock_irqsave(&port->inbuf_lock, flags);
+	if (!port->inbuf)
+		port->inbuf = get_inbuf(port);
+
+	/*
+	 * Don't queue up data when port is closed.  This condition
+	 * can be reached when a console port is not yet connected (no
+	 * tty is spawned) and the host sends out data to console
+	 * ports.  For generic serial ports, the host won't
+	 * (shouldn't) send data till the guest is connected.
+	 */
+	if (!port->guest_connected)
+		discard_port_data(port);
+
+	spin_unlock_irqrestore(&port->inbuf_lock, flags);
+
+	wake_up_interruptible(&port->waitqueue);
+
+	if (is_console_port(port) && hvc_poll(port->cons.hvc))
 		hvc_kick();
 }
 
-/*D:370 Once we're further in boot, we get probed like any other virtio device.
- * At this stage we set up the output virtqueue.
- *
- * To set up and manage our virtual console, we call hvc_alloc().  Since we
- * never remove the console device we never need this pointer again.
- *
- * Finally we put our input buffer in the input queue, ready to receive. */
-static int __devinit virtcons_probe(struct virtio_device *dev)
+static void control_intr(struct virtqueue *vq)
 {
-	vq_callback_t *callbacks[] = { hvc_handle_input, NULL};
-	const char *names[] = { "input", "output" };
-	struct virtqueue *vqs[2];
+	struct ports_device *portdev;
+
+	portdev = vq->vdev->priv;
+	schedule_work(&portdev->control_work);
+}
+
+static void config_intr(struct virtio_device *vdev)
+{
+	struct ports_device *portdev;
+
+	portdev = vdev->priv;
+	if (use_multiport(portdev)) {
+		/* Handle port hot-add */
+		schedule_work(&portdev->config_work);
+	}
+	/*
+	 * We'll use this way of resizing only for legacy support.
+	 * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use
+	 * control messages to indicate console size changes so that
+	 * it can be done per-port
+	 */
+	resize_console(find_port_by_id(portdev, 0));
+}
+
+static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+{
+	struct port_buffer *buf;
+	unsigned int ret;
 	int err;
 
-	vdev = dev;
+	ret = 0;
+	do {
+		buf = alloc_buf(PAGE_SIZE);
+		if (!buf)
+			break;
 
-	/* This is the scratch page we use to receive console input */
-	inbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-	if (!inbuf) {
+		spin_lock_irq(lock);
+		err = add_inbuf(vq, buf);
+		if (err < 0) {
+			spin_unlock_irq(lock);
+			free_buf(buf);
+			break;
+		}
+		ret++;
+		spin_unlock_irq(lock);
+	} while (err > 0);
+
+	return ret;
+}
+
+static int add_port(struct ports_device *portdev, u32 id)
+{
+	char debugfs_name[16];
+	struct port *port;
+	struct port_buffer *buf;
+	dev_t devt;
+	int err;
+
+	port = kmalloc(sizeof(*port), GFP_KERNEL);
+	if (!port) {
 		err = -ENOMEM;
 		goto fail;
 	}
 
-	/* Find the queues. */
-	/* FIXME: This is why we want to wean off hvc: we do nothing
-	 * when input comes in. */
-	err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
-	if (err)
-		goto free;
+	port->portdev = portdev;
+	port->id = id;
 
-	in_vq = vqs[0];
-	out_vq = vqs[1];
+	port->name = NULL;
+	port->inbuf = NULL;
+	port->cons.hvc = NULL;
 
-	/* Start using the new console output. */
-	virtio_cons.get_chars = get_chars;
-	virtio_cons.put_chars = put_chars;
-	virtio_cons.notifier_add = notifier_add_vio;
-	virtio_cons.notifier_del = notifier_del_vio;
-	virtio_cons.notifier_hangup = notifier_del_vio;
+	port->host_connected = port->guest_connected = false;
 
-	/* The first argument of hvc_alloc() is the virtual console number, so
-	 * we use zero.  The second argument is the parameter for the
-	 * notification mechanism (like irq number). We currently leave this
-	 * as zero, virtqueues have implicit notifications.
-	 *
-	 * The third argument is a "struct hv_ops" containing the put_chars()
-	 * get_chars(), notifier_add() and notifier_del() pointers.
-	 * The final argument is the output buffer size: we can do any size,
-	 * so we put PAGE_SIZE here. */
-	hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE);
-	if (IS_ERR(hvc)) {
-		err = PTR_ERR(hvc);
-		goto free_vqs;
+	port->in_vq = portdev->in_vqs[port->id];
+	port->out_vq = portdev->out_vqs[port->id];
+
+	cdev_init(&port->cdev, &port_fops);
+
+	devt = MKDEV(portdev->chr_major, id);
+	err = cdev_add(&port->cdev, devt, 1);
+	if (err < 0) {
+		dev_err(&port->portdev->vdev->dev,
+			"Error %d adding cdev for port %u\n", err, id);
+		goto free_port;
+	}
+	port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
+				  devt, port, "vport%up%u",
+				  port->portdev->drv_index, id);
+	if (IS_ERR(port->dev)) {
+		err = PTR_ERR(port->dev);
+		dev_err(&port->portdev->vdev->dev,
+			"Error %d creating device for port %u\n",
+			err, id);
+		goto free_cdev;
 	}
 
-	/* Register the input buffer the first time. */
-	add_inbuf();
+	spin_lock_init(&port->inbuf_lock);
+	init_waitqueue_head(&port->waitqueue);
+
+	/* Fill the in_vq with buffers so the host can send us data. */
+	err = fill_queue(port->in_vq, &port->inbuf_lock);
+	if (!err) {
+		dev_err(port->dev, "Error allocating inbufs\n");
+		err = -ENOMEM;
+		goto free_device;
+	}
+
+	/*
+	 * If we're not using multiport support, this has to be a console port
+	 */
+	if (!use_multiport(port->portdev)) {
+		err = init_port_console(port);
+		if (err)
+			goto free_inbufs;
+	}
+
+	spin_lock_irq(&portdev->ports_lock);
+	list_add_tail(&port->list, &port->portdev->ports);
+	spin_unlock_irq(&portdev->ports_lock);
+
+	/*
+	 * Tell the Host we're set so that it can send us various
+	 * configuration parameters for this port (eg, port name,
+	 * caching, whether this is a console port, etc.)
+	 */
+	send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+	if (pdrvdata.debugfs_dir) {
+		/*
+		 * Finally, create the debugfs file that we can use to
+		 * inspect a port's state at any time
+		 */
+		sprintf(debugfs_name, "vport%up%u",
+			port->portdev->drv_index, id);
+		port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+							 pdrvdata.debugfs_dir,
+							 port,
+							 &port_debugfs_ops);
+	}
+	return 0;
+
+free_inbufs:
+	while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
+		free_buf(buf);
+free_device:
+	device_destroy(pdrvdata.class, port->dev->devt);
+free_cdev:
+	cdev_del(&port->cdev);
+free_port:
+	kfree(port);
+fail:
+	return err;
+}
+
+/*
+ * The workhandler for config-space updates.
+ *
+ * This is called when ports are hot-added.
+ */
+static void config_work_handler(struct work_struct *work)
+{
+	struct virtio_console_config virtconconf;
+	struct ports_device *portdev;
+	struct virtio_device *vdev;
+	int err;
+
+	portdev = container_of(work, struct ports_device, config_work);
+
+	vdev = portdev->vdev;
+	vdev->config->get(vdev,
+			  offsetof(struct virtio_console_config, nr_ports),
+			  &virtconconf.nr_ports,
+			  sizeof(virtconconf.nr_ports));
+
+	if (portdev->config.nr_ports == virtconconf.nr_ports) {
+		/*
+		 * Port 0 got hot-added.  Since we already did all the
+		 * other initialisation for it, just tell the Host
+		 * that the port is ready if we find the port.  In
+		 * case the port was hot-removed earlier, we call
+		 * add_port to add the port.
+		 */
+		struct port *port;
+
+		port = find_port_by_id(portdev, 0);
+		if (!port)
+			add_port(portdev, 0);
+		else
+			send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+		return;
+	}
+	if (virtconconf.nr_ports > portdev->config.max_nr_ports) {
+		dev_warn(&vdev->dev,
+			 "More ports specified (%u) than allowed (%u)",
+			 portdev->config.nr_ports + 1,
+			 portdev->config.max_nr_ports);
+		return;
+	}
+	if (virtconconf.nr_ports < portdev->config.nr_ports)
+		return;
+
+	/* Hot-add ports */
+	while (virtconconf.nr_ports - portdev->config.nr_ports) {
+		err = add_port(portdev, portdev->config.nr_ports);
+		if (err)
+			break;
+		portdev->config.nr_ports++;
+	}
+}
+
+static int init_vqs(struct ports_device *portdev)
+{
+	vq_callback_t **io_callbacks;
+	char **io_names;
+	struct virtqueue **vqs;
+	u32 i, j, nr_ports, nr_queues;
+	int err;
+
+	nr_ports = portdev->config.max_nr_ports;
+	nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
+
+	vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
+	if (!vqs) {
+		err = -ENOMEM;
+		goto fail;
+	}
+	io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL);
+	if (!io_callbacks) {
+		err = -ENOMEM;
+		goto free_vqs;
+	}
+	io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL);
+	if (!io_names) {
+		err = -ENOMEM;
+		goto free_callbacks;
+	}
+	portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
+				  GFP_KERNEL);
+	if (!portdev->in_vqs) {
+		err = -ENOMEM;
+		goto free_names;
+	}
+	portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
+				   GFP_KERNEL);
+	if (!portdev->out_vqs) {
+		err = -ENOMEM;
+		goto free_invqs;
+	}
+
+	/*
+	 * For backward compat (newer host but older guest), the host
+	 * spawns a console port first and also inits the vqs for port
+	 * 0 before others.
+	 */
+	j = 0;
+	io_callbacks[j] = in_intr;
+	io_callbacks[j + 1] = NULL;
+	io_names[j] = "input";
+	io_names[j + 1] = "output";
+	j += 2;
+
+	if (use_multiport(portdev)) {
+		io_callbacks[j] = control_intr;
+		io_callbacks[j + 1] = NULL;
+		io_names[j] = "control-i";
+		io_names[j + 1] = "control-o";
+
+		for (i = 1; i < nr_ports; i++) {
+			j += 2;
+			io_callbacks[j] = in_intr;
+			io_callbacks[j + 1] = NULL;
+			io_names[j] = "input";
+			io_names[j + 1] = "output";
+		}
+	}
+	/* Find the queues. */
+	err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs,
+					      io_callbacks,
+					      (const char **)io_names);
+	if (err)
+		goto free_outvqs;
+
+	j = 0;
+	portdev->in_vqs[0] = vqs[0];
+	portdev->out_vqs[0] = vqs[1];
+	j += 2;
+	if (use_multiport(portdev)) {
+		portdev->c_ivq = vqs[j];
+		portdev->c_ovq = vqs[j + 1];
+
+		for (i = 1; i < nr_ports; i++) {
+			j += 2;
+			portdev->in_vqs[i] = vqs[j];
+			portdev->out_vqs[i] = vqs[j + 1];
+		}
+	}
+	kfree(io_callbacks);
+	kfree(io_names);
+	kfree(vqs);
+
+	return 0;
+
+free_names:
+	kfree(io_names);
+free_callbacks:
+	kfree(io_callbacks);
+free_outvqs:
+	kfree(portdev->out_vqs);
+free_invqs:
+	kfree(portdev->in_vqs);
+free_vqs:
+	kfree(vqs);
+fail:
+	return err;
+}
+
+static const struct file_operations portdev_fops = {
+	.owner = THIS_MODULE,
+};
+
+/*
+ * Once we're further in boot, we get probed like any other virtio
+ * device.
+ *
+ * If the host also supports multiple console ports, we check the
+ * config space to see how many ports the host has spawned.  We
+ * initialize each port found.
+ */
+static int __devinit virtcons_probe(struct virtio_device *vdev)
+{
+	struct ports_device *portdev;
+	u32 i;
+	int err;
+	bool multiport;
+
+	portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
+	if (!portdev) {
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	/* Attach this portdev to this virtio_device, and vice-versa. */
+	portdev->vdev = vdev;
+	vdev->priv = portdev;
+
+	spin_lock_irq(&pdrvdata_lock);
+	portdev->drv_index = pdrvdata.index++;
+	spin_unlock_irq(&pdrvdata_lock);
+
+	portdev->chr_major = register_chrdev(0, "virtio-portsdev",
+					     &portdev_fops);
+	if (portdev->chr_major < 0) {
+		dev_err(&vdev->dev,
+			"Error %d registering chrdev for device %u\n",
+			portdev->chr_major, portdev->drv_index);
+		err = portdev->chr_major;
+		goto free;
+	}
+
+	multiport = false;
+	portdev->config.nr_ports = 1;
+	portdev->config.max_nr_ports = 1;
+	if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
+		multiport = true;
+		vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
+
+		vdev->config->get(vdev, offsetof(struct virtio_console_config,
+						 nr_ports),
+				  &portdev->config.nr_ports,
+				  sizeof(portdev->config.nr_ports));
+		vdev->config->get(vdev, offsetof(struct virtio_console_config,
+						 max_nr_ports),
+				  &portdev->config.max_nr_ports,
+				  sizeof(portdev->config.max_nr_ports));
+		if (portdev->config.nr_ports > portdev->config.max_nr_ports) {
+			dev_warn(&vdev->dev,
+				 "More ports (%u) specified than allowed (%u). Will init %u ports.",
+				 portdev->config.nr_ports,
+				 portdev->config.max_nr_ports,
+				 portdev->config.max_nr_ports);
+
+			portdev->config.nr_ports = portdev->config.max_nr_ports;
+		}
+	}
+
+	/* Let the Host know we support multiple ports.*/
+	vdev->config->finalize_features(vdev);
+
+	err = init_vqs(portdev);
+	if (err < 0) {
+		dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
+		goto free_chrdev;
+	}
+
+	spin_lock_init(&portdev->ports_lock);
+	INIT_LIST_HEAD(&portdev->ports);
+
+	if (multiport) {
+		spin_lock_init(&portdev->cvq_lock);
+		INIT_WORK(&portdev->control_work, &control_work_handler);
+		INIT_WORK(&portdev->config_work, &config_work_handler);
+
+		err = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
+		if (!err) {
+			dev_err(&vdev->dev,
+				"Error allocating buffers for control queue\n");
+			err = -ENOMEM;
+			goto free_vqs;
+		}
+	}
+
+	for (i = 0; i < portdev->config.nr_ports; i++)
+		add_port(portdev, i);
+
+	/* Start using the new console output. */
+	early_put_chars = NULL;
 	return 0;
 
 free_vqs:
 	vdev->config->del_vqs(vdev);
+	kfree(portdev->in_vqs);
+	kfree(portdev->out_vqs);
+free_chrdev:
+	unregister_chrdev(portdev->chr_major, "virtio-portsdev");
 free:
-	kfree(inbuf);
+	kfree(portdev);
 fail:
 	return err;
 }
 
+static void virtcons_remove(struct virtio_device *vdev)
+{
+	struct ports_device *portdev;
+	struct port *port, *port2;
+	struct port_buffer *buf;
+	unsigned int len;
+
+	portdev = vdev->priv;
+
+	cancel_work_sync(&portdev->control_work);
+	cancel_work_sync(&portdev->config_work);
+
+	list_for_each_entry_safe(port, port2, &portdev->ports, list)
+		remove_port(port);
+
+	unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+
+	while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len)))
+		free_buf(buf);
+
+	while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq)))
+		free_buf(buf);
+
+	vdev->config->del_vqs(vdev);
+	kfree(portdev->in_vqs);
+	kfree(portdev->out_vqs);
+
+	kfree(portdev);
+}
+
 static struct virtio_device_id id_table[] = {
 	{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
 	{ 0 },
@@ -253,6 +1511,7 @@
 
 static unsigned int features[] = {
 	VIRTIO_CONSOLE_F_SIZE,
+	VIRTIO_CONSOLE_F_MULTIPORT,
 };
 
 static struct virtio_driver virtio_console = {
@@ -262,14 +1521,41 @@
 	.driver.owner =	THIS_MODULE,
 	.id_table =	id_table,
 	.probe =	virtcons_probe,
-	.config_changed = virtcons_apply_config,
+	.remove =	virtcons_remove,
+	.config_changed = config_intr,
 };
 
 static int __init init(void)
 {
+	int err;
+
+	pdrvdata.class = class_create(THIS_MODULE, "virtio-ports");
+	if (IS_ERR(pdrvdata.class)) {
+		err = PTR_ERR(pdrvdata.class);
+		pr_err("Error %d creating virtio-ports class\n", err);
+		return err;
+	}
+
+	pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
+	if (!pdrvdata.debugfs_dir) {
+		pr_warning("Error %ld creating debugfs dir for virtio-ports\n",
+			   PTR_ERR(pdrvdata.debugfs_dir));
+	}
+	INIT_LIST_HEAD(&pdrvdata.consoles);
+
 	return register_virtio_driver(&virtio_console);
 }
+
+static void __exit fini(void)
+{
+	unregister_virtio_driver(&virtio_console);
+
+	class_destroy(pdrvdata.class);
+	if (pdrvdata.debugfs_dir)
+		debugfs_remove_recursive(pdrvdata.debugfs_dir);
+}
 module_init(init);
+module_exit(fini);
 
 MODULE_DEVICE_TABLE(virtio, id_table);
 MODULE_DESCRIPTION("Virtio console driver");
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 6b3e0c2..6fe4f77 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -603,18 +603,13 @@
 	p->irqaction.handler = sh_cmt_interrupt;
 	p->irqaction.dev_id = p;
 	p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
-	ret = setup_irq(irq, &p->irqaction);
-	if (ret) {
-		pr_err("sh_cmt: failed to request irq %d\n", irq);
-		goto err1;
-	}
 
 	/* get hold of clock */
 	p->clk = clk_get(&p->pdev->dev, cfg->clk);
 	if (IS_ERR(p->clk)) {
 		pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk);
 		ret = PTR_ERR(p->clk);
-		goto err2;
+		goto err1;
 	}
 
 	if (resource_size(res) == 6) {
@@ -627,14 +622,25 @@
 		p->clear_bits = ~0xc000;
 	}
 
-	return sh_cmt_register(p, cfg->name,
-			       cfg->clockevent_rating,
-			       cfg->clocksource_rating);
- err2:
-	remove_irq(irq, &p->irqaction);
- err1:
+	ret = sh_cmt_register(p, cfg->name,
+			      cfg->clockevent_rating,
+			      cfg->clocksource_rating);
+	if (ret) {
+		pr_err("sh_cmt: registration failed\n");
+		goto err1;
+	}
+
+	ret = setup_irq(irq, &p->irqaction);
+	if (ret) {
+		pr_err("sh_cmt: failed to request irq %d\n", irq);
+		goto err1;
+	}
+
+	return 0;
+
+err1:
 	iounmap(p->mapbase);
- err0:
+err0:
 	return ret;
 }
 
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 973e714..4c8a759 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -221,15 +221,15 @@
 	ced->cpumask = cpumask_of(0);
 	ced->set_mode = sh_mtu2_clock_event_mode;
 
+	pr_info("sh_mtu2: %s used for clock events\n", ced->name);
+	clockevents_register_device(ced);
+
 	ret = setup_irq(p->irqaction.irq, &p->irqaction);
 	if (ret) {
 		pr_err("sh_mtu2: failed to request irq %d\n",
 		       p->irqaction.irq);
 		return;
 	}
-
-	pr_info("sh_mtu2: %s used for clock events\n", ced->name);
-	clockevents_register_device(ced);
 }
 
 static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name,
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 93c2322..961f5b5 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -323,15 +323,15 @@
 	ced->set_next_event = sh_tmu_clock_event_next;
 	ced->set_mode = sh_tmu_clock_event_mode;
 
+	pr_info("sh_tmu: %s used for clock events\n", ced->name);
+	clockevents_register_device(ced);
+
 	ret = setup_irq(p->irqaction.irq, &p->irqaction);
 	if (ret) {
 		pr_err("sh_tmu: failed to request irq %d\n",
 		       p->irqaction.irq);
 		return;
 	}
-
-	pr_info("sh_tmu: %s used for clock events\n", ced->name);
-	clockevents_register_device(ced);
 }
 
 static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 46e899a..1c3849f 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1274,7 +1274,7 @@
 	return 0;
 }
 
-static struct of_device_id crypto4xx_match[] = {
+static const struct of_device_id crypto4xx_match[] = {
 	{ .compatible      = "amcc,ppc4xx-crypto",},
 	{ },
 };
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 4801162..c7a5a43 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -135,13 +135,13 @@
 	/*
 	 * The requested key size is not supported by HW, do a fallback
 	 */
-	op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
-	op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
+	op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+	op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
 
 	ret = crypto_cipher_setkey(op->fallback.cip, key, len);
 	if (ret) {
 		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
-		tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
+		tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
 	}
 	return ret;
 }
@@ -263,7 +263,7 @@
 
 	if (IS_ERR(op->fallback.cip)) {
 		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
-		return PTR_ERR(op->fallback.blk);
+		return PTR_ERR(op->fallback.cip);
 	}
 
 	return 0;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c47ffe8..fd529d6 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1958,7 +1958,7 @@
 	return err;
 }
 
-static struct of_device_id talitos_match[] = {
+static const struct of_device_id talitos_match[] = {
 	{
 		.compatible = "fsl,sec2.0",
 	},
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index d10cc89..b75ce8b 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -48,23 +48,20 @@
  */
 #define RS_DEFAULT  (RS_DUAL)
 
+/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
+static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
+
 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
 
 #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
 {
-	ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
+	ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
 }
 
 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
 {
-	return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
-}
-
-static void dmae_init(struct sh_dmae_chan *sh_chan)
-{
-	u32 chcr = RS_DEFAULT; /* default is DUAL mode */
-	sh_dmae_writel(sh_chan, chcr, CHCR);
+	return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
 }
 
 /*
@@ -95,27 +92,30 @@
 	return 0;
 }
 
-static int dmae_is_busy(struct sh_dmae_chan *sh_chan)
+static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
 {
 	u32 chcr = sh_dmae_readl(sh_chan, CHCR);
-	if (chcr & CHCR_DE) {
-		if (!(chcr & CHCR_TE))
-			return -EBUSY; /* working */
-	}
-	return 0; /* waiting */
+
+	if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
+		return true; /* working */
+
+	return false; /* waiting */
 }
 
-static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
+static unsigned int ts_shift[] = TS_SHIFT;
+static inline unsigned int calc_xmit_shift(u32 chcr)
 {
-	u32 chcr = sh_dmae_readl(sh_chan, CHCR);
-	return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
+	int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
+		((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
+
+	return ts_shift[cnt];
 }
 
 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
 {
 	sh_dmae_writel(sh_chan, hw->sar, SAR);
 	sh_dmae_writel(sh_chan, hw->dar, DAR);
-	sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR);
+	sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
 }
 
 static void dmae_start(struct sh_dmae_chan *sh_chan)
@@ -123,7 +123,7 @@
 	u32 chcr = sh_dmae_readl(sh_chan, CHCR);
 
 	chcr |= CHCR_DE | CHCR_IE;
-	sh_dmae_writel(sh_chan, chcr, CHCR);
+	sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
 }
 
 static void dmae_halt(struct sh_dmae_chan *sh_chan)
@@ -134,55 +134,50 @@
 	sh_dmae_writel(sh_chan, chcr, CHCR);
 }
 
+static void dmae_init(struct sh_dmae_chan *sh_chan)
+{
+	u32 chcr = RS_DEFAULT; /* default is DUAL mode */
+	sh_chan->xmit_shift = calc_xmit_shift(chcr);
+	sh_dmae_writel(sh_chan, chcr, CHCR);
+}
+
 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
 {
-	int ret = dmae_is_busy(sh_chan);
 	/* When DMA was working, can not set data to CHCR */
-	if (ret)
-		return ret;
+	if (dmae_is_busy(sh_chan))
+		return -EBUSY;
 
+	sh_chan->xmit_shift = calc_xmit_shift(val);
 	sh_dmae_writel(sh_chan, val, CHCR);
+
 	return 0;
 }
 
-#define DMARS1_ADDR	0x04
-#define DMARS2_ADDR	0x08
-#define DMARS_SHIFT 8
-#define DMARS_CHAN_MSK 0x01
+#define DMARS_SHIFT	8
+#define DMARS_CHAN_MSK	0x01
 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
 {
 	u32 addr;
 	int shift = 0;
-	int ret = dmae_is_busy(sh_chan);
-	if (ret)
-		return ret;
+
+	if (dmae_is_busy(sh_chan))
+		return -EBUSY;
 
 	if (sh_chan->id & DMARS_CHAN_MSK)
 		shift = DMARS_SHIFT;
 
-	switch (sh_chan->id) {
-	/* DMARS0 */
-	case 0:
-	case 1:
-		addr = SH_DMARS_BASE;
-		break;
-	/* DMARS1 */
-	case 2:
-	case 3:
-		addr = (SH_DMARS_BASE + DMARS1_ADDR);
-		break;
-	/* DMARS2 */
-	case 4:
-	case 5:
-		addr = (SH_DMARS_BASE + DMARS2_ADDR);
-		break;
-	default:
+	if (sh_chan->id < 6)
+		/* DMA0RS0 - DMA0RS2 */
+		addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4;
+#ifdef SH_DMARS_BASE1
+	else if (sh_chan->id < 12)
+		/* DMA1RS0 - DMA1RS2 */
+		addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4;
+#endif
+	else
 		return -EINVAL;
-	}
 
-	ctrl_outw((val << shift) |
-		(ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)),
-		addr);
+	ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr);
 
 	return 0;
 }
@@ -250,10 +245,53 @@
 	return NULL;
 }
 
+static struct sh_dmae_slave_config *sh_dmae_find_slave(
+	struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
+{
+	struct dma_device *dma_dev = sh_chan->common.device;
+	struct sh_dmae_device *shdev = container_of(dma_dev,
+					struct sh_dmae_device, common);
+	struct sh_dmae_pdata *pdata = &shdev->pdata;
+	int i;
+
+	if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
+		return NULL;
+
+	for (i = 0; i < pdata->config_num; i++)
+		if (pdata->config[i].slave_id == slave_id)
+			return pdata->config + i;
+
+	return NULL;
+}
+
 static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
 {
 	struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
 	struct sh_desc *desc;
+	struct sh_dmae_slave *param = chan->private;
+
+	/*
+	 * This relies on the guarantee from dmaengine that alloc_chan_resources
+	 * never runs concurrently with itself or free_chan_resources.
+	 */
+	if (param) {
+		struct sh_dmae_slave_config *cfg;
+
+		cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
+		if (!cfg)
+			return -EINVAL;
+
+		if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
+			return -EBUSY;
+
+		param->config = cfg;
+
+		dmae_set_dmars(sh_chan, cfg->mid_rid);
+		dmae_set_chcr(sh_chan, cfg->chcr);
+	} else {
+		if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400)
+			dmae_set_chcr(sh_chan, RS_DEFAULT);
+	}
 
 	spin_lock_bh(&sh_chan->desc_lock);
 	while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
@@ -286,10 +324,18 @@
 	struct sh_desc *desc, *_desc;
 	LIST_HEAD(list);
 
+	dmae_halt(sh_chan);
+
 	/* Prepared and not submitted descriptors can still be on the queue */
 	if (!list_empty(&sh_chan->ld_queue))
 		sh_dmae_chan_ld_cleanup(sh_chan, true);
 
+	if (chan->private) {
+		/* The caller is holding dma_list_mutex */
+		struct sh_dmae_slave *param = chan->private;
+		clear_bit(param->slave_id, sh_dmae_slave_used);
+	}
+
 	spin_lock_bh(&sh_chan->desc_lock);
 
 	list_splice_init(&sh_chan->ld_free, &list);
@@ -301,23 +347,97 @@
 		kfree(desc);
 }
 
-static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
-	struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
-	size_t len, unsigned long flags)
+/**
+ * sh_dmae_add_desc - get, set up and return one transfer descriptor
+ * @sh_chan:	DMA channel
+ * @flags:	DMA transfer flags
+ * @dest:	destination DMA address, incremented when direction equals
+ *		DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
+ * @src:	source DMA address, incremented when direction equals
+ *		DMA_TO_DEVICE or DMA_BIDIRECTIONAL
+ * @len:	DMA transfer length
+ * @first:	if NULL, set to the current descriptor and cookie set to -EBUSY
+ * @direction:	needed for slave DMA to decide which address to keep constant,
+ *		equals DMA_BIDIRECTIONAL for MEMCPY
+ * Returns 0 or an error
+ * Locks: called with desc_lock held
+ */
+static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
+	unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
+	struct sh_desc **first, enum dma_data_direction direction)
 {
-	struct sh_dmae_chan *sh_chan;
-	struct sh_desc *first = NULL, *prev = NULL, *new;
+	struct sh_desc *new;
 	size_t copy_size;
+
+	if (!*len)
+		return NULL;
+
+	/* Allocate the link descriptor from the free list */
+	new = sh_dmae_get_desc(sh_chan);
+	if (!new) {
+		dev_err(sh_chan->dev, "No free link descriptor available\n");
+		return NULL;
+	}
+
+	copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
+
+	new->hw.sar = *src;
+	new->hw.dar = *dest;
+	new->hw.tcr = copy_size;
+
+	if (!*first) {
+		/* First desc */
+		new->async_tx.cookie = -EBUSY;
+		*first = new;
+	} else {
+		/* Other desc - invisible to the user */
+		new->async_tx.cookie = -EINVAL;
+	}
+
+	dev_dbg(sh_chan->dev,
+		"chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
+		copy_size, *len, *src, *dest, &new->async_tx,
+		new->async_tx.cookie, sh_chan->xmit_shift);
+
+	new->mark = DESC_PREPARED;
+	new->async_tx.flags = flags;
+	new->direction = direction;
+
+	*len -= copy_size;
+	if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
+		*src += copy_size;
+	if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
+		*dest += copy_size;
+
+	return new;
+}
+
+/*
+ * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
+ *
+ * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
+ * converted to scatter-gather to guarantee consistent locking and a correct
+ * list manipulation. For slave DMA direction carries the usual meaning, and,
+ * logically, the SG list is RAM and the addr variable contains slave address,
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
+ * and the SG list contains only one element and points at the source buffer.
+ */
+static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
+	struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
+	enum dma_data_direction direction, unsigned long flags)
+{
+	struct scatterlist *sg;
+	struct sh_desc *first = NULL, *new = NULL /* compiler... */;
 	LIST_HEAD(tx_list);
-	int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
+	int chunks = 0;
+	int i;
 
-	if (!chan)
+	if (!sg_len)
 		return NULL;
 
-	if (!len)
-		return NULL;
-
-	sh_chan = to_sh_chan(chan);
+	for_each_sg(sgl, sg, sg_len, i)
+		chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
+			(SH_DMA_TCR_MAX + 1);
 
 	/* Have to lock the whole loop to protect against concurrent release */
 	spin_lock_bh(&sh_chan->desc_lock);
@@ -333,49 +453,32 @@
 	 *	only during this function, then they are immediately spliced
 	 *	back onto the free list in form of a chain
 	 */
-	do {
-		/* Allocate the link descriptor from the free list */
-		new = sh_dmae_get_desc(sh_chan);
-		if (!new) {
-			dev_err(sh_chan->dev,
-				"No free memory for link descriptor\n");
-			list_for_each_entry(new, &tx_list, node)
-				new->mark = DESC_IDLE;
-			list_splice(&tx_list, &sh_chan->ld_free);
-			spin_unlock_bh(&sh_chan->desc_lock);
-			return NULL;
-		}
+	for_each_sg(sgl, sg, sg_len, i) {
+		dma_addr_t sg_addr = sg_dma_address(sg);
+		size_t len = sg_dma_len(sg);
 
-		copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1);
+		if (!len)
+			goto err_get_desc;
 
-		new->hw.sar = dma_src;
-		new->hw.dar = dma_dest;
-		new->hw.tcr = copy_size;
-		if (!first) {
-			/* First desc */
-			new->async_tx.cookie = -EBUSY;
-			first = new;
-		} else {
-			/* Other desc - invisible to the user */
-			new->async_tx.cookie = -EINVAL;
-		}
+		do {
+			dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
+				i, sg, len, (unsigned long long)sg_addr);
 
-		dev_dbg(sh_chan->dev,
-			"chaining %u of %u with %p, dst %x, cookie %d\n",
-			copy_size, len, &new->async_tx, dma_dest,
-			new->async_tx.cookie);
+			if (direction == DMA_FROM_DEVICE)
+				new = sh_dmae_add_desc(sh_chan, flags,
+						&sg_addr, addr, &len, &first,
+						direction);
+			else
+				new = sh_dmae_add_desc(sh_chan, flags,
+						addr, &sg_addr, &len, &first,
+						direction);
+			if (!new)
+				goto err_get_desc;
 
-		new->mark = DESC_PREPARED;
-		new->async_tx.flags = flags;
-		new->chunks = chunks--;
-
-		prev = new;
-		len -= copy_size;
-		dma_src += copy_size;
-		dma_dest += copy_size;
-		/* Insert the link descriptor to the LD ring */
-		list_add_tail(&new->node, &tx_list);
-	} while (len);
+			new->chunks = chunks--;
+			list_add_tail(&new->node, &tx_list);
+		} while (len);
+	}
 
 	if (new != first)
 		new->async_tx.cookie = -ENOSPC;
@@ -386,6 +489,77 @@
 	spin_unlock_bh(&sh_chan->desc_lock);
 
 	return &first->async_tx;
+
+err_get_desc:
+	list_for_each_entry(new, &tx_list, node)
+		new->mark = DESC_IDLE;
+	list_splice(&tx_list, &sh_chan->ld_free);
+
+	spin_unlock_bh(&sh_chan->desc_lock);
+
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
+	struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
+	size_t len, unsigned long flags)
+{
+	struct sh_dmae_chan *sh_chan;
+	struct scatterlist sg;
+
+	if (!chan || !len)
+		return NULL;
+
+	chan->private = NULL;
+
+	sh_chan = to_sh_chan(chan);
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
+		    offset_in_page(dma_src));
+	sg_dma_address(&sg) = dma_src;
+	sg_dma_len(&sg) = len;
+
+	return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
+			       flags);
+}
+
+static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+	enum dma_data_direction direction, unsigned long flags)
+{
+	struct sh_dmae_slave *param;
+	struct sh_dmae_chan *sh_chan;
+
+	if (!chan)
+		return NULL;
+
+	sh_chan = to_sh_chan(chan);
+	param = chan->private;
+
+	/* Someone calling slave DMA on a public channel? */
+	if (!param || !sg_len) {
+		dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
+			 __func__, param, sg_len, param ? param->slave_id : -1);
+		return NULL;
+	}
+
+	/*
+	 * if (param != NULL), this is a successfully requested slave channel,
+	 * therefore param->config != NULL too.
+	 */
+	return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
+			       direction, flags);
+}
+
+static void sh_dmae_terminate_all(struct dma_chan *chan)
+{
+	struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+
+	if (!chan)
+		return;
+
+	sh_dmae_chan_ld_cleanup(sh_chan, true);
 }
 
 static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
@@ -419,7 +593,11 @@
 			cookie = tx->cookie;
 
 		if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
-			BUG_ON(sh_chan->completed_cookie != desc->cookie - 1);
+			if (sh_chan->completed_cookie != desc->cookie - 1)
+				dev_dbg(sh_chan->dev,
+					"Completing cookie %d, expected %d\n",
+					desc->cookie,
+					sh_chan->completed_cookie + 1);
 			sh_chan->completed_cookie = desc->cookie;
 		}
 
@@ -492,7 +670,7 @@
 		return;
 	}
 
-	/* Find the first un-transfer desciptor */
+	/* Find the first not transferred desciptor */
 	list_for_each_entry(sd, &sh_chan->ld_queue, node)
 		if (sd->mark == DESC_SUBMITTED) {
 			/* Get the ld start address from ld_queue */
@@ -559,7 +737,7 @@
 
 	/* IRQ Multi */
 	if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
-		int cnt = 0;
+		int __maybe_unused cnt = 0;
 		switch (irq) {
 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
 		case DMTE6_IRQ:
@@ -596,11 +774,14 @@
 	struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
 	struct sh_desc *desc;
 	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
+	u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
 
 	spin_lock(&sh_chan->desc_lock);
 	list_for_each_entry(desc, &sh_chan->ld_queue, node) {
-		if ((desc->hw.sar + desc->hw.tcr) == sar_buf &&
-		    desc->mark == DESC_SUBMITTED) {
+		if (desc->mark == DESC_SUBMITTED &&
+		    ((desc->direction == DMA_FROM_DEVICE &&
+		      (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
+		     (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
 			dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
 				desc->async_tx.cookie, &desc->async_tx,
 				desc->hw.dar);
@@ -673,7 +854,7 @@
 	}
 
 	snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
-			"sh-dmae%d", new_sh_chan->id);
+		 "sh-dmae%d", new_sh_chan->id);
 
 	/* set up channel irq */
 	err = request_irq(irq, &sh_dmae_interrupt, irqflags,
@@ -684,11 +865,6 @@
 		goto err_no_irq;
 	}
 
-	/* CHCR register control function */
-	new_sh_chan->set_chcr = dmae_set_chcr;
-	/* DMARS register control function */
-	new_sh_chan->set_dmars = dmae_set_dmars;
-
 	shdev->chan[id] = new_sh_chan;
 	return 0;
 
@@ -759,12 +935,19 @@
 	INIT_LIST_HEAD(&shdev->common.channels);
 
 	dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
+	dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
+
 	shdev->common.device_alloc_chan_resources
 		= sh_dmae_alloc_chan_resources;
 	shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
 	shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
 	shdev->common.device_is_tx_complete = sh_dmae_is_complete;
 	shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
+
+	/* Compulsory for DMA_SLAVE fields */
+	shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
+	shdev->common.device_terminate_all = sh_dmae_terminate_all;
+
 	shdev->common.dev = &pdev->dev;
 	/* Default transfer size of 32 bytes requires 32-byte alignment */
 	shdev->common.copy_align = 5;
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 108f1cf..7e227f3 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -29,6 +29,7 @@
 	struct sh_dmae_regs hw;
 	struct list_head node;
 	struct dma_async_tx_descriptor async_tx;
+	enum dma_data_direction direction;
 	dma_cookie_t cookie;
 	int chunks;
 	int mark;
@@ -45,13 +46,9 @@
 	struct device *dev;		/* Channel device */
 	struct tasklet_struct tasklet;	/* Tasklet */
 	int descs_allocated;		/* desc count */
+	int xmit_shift;			/* log_2(bytes_per_xfer) */
 	int id;				/* Raw id of this channel */
 	char dev_id[16];		/* unique name per DMAC of channel */
-
-	/* Set chcr */
-	int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
-	/* Set DMA resource */
-	int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res);
 };
 
 struct sh_dmae_device {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e7b1944..22d4761 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1279,47 +1279,47 @@
 	rdev->mode_info.connector_table = radeon_connector_table;
 	if (rdev->mode_info.connector_table == CT_NONE) {
 #ifdef CONFIG_PPC_PMAC
-		if (machine_is_compatible("PowerBook3,3")) {
+		if (of_machine_is_compatible("PowerBook3,3")) {
 			/* powerbook with VGA */
 			rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
-		} else if (machine_is_compatible("PowerBook3,4") ||
-			   machine_is_compatible("PowerBook3,5")) {
+		} else if (of_machine_is_compatible("PowerBook3,4") ||
+			   of_machine_is_compatible("PowerBook3,5")) {
 			/* powerbook with internal tmds */
 			rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
-		} else if (machine_is_compatible("PowerBook5,1") ||
-			   machine_is_compatible("PowerBook5,2") ||
-			   machine_is_compatible("PowerBook5,3") ||
-			   machine_is_compatible("PowerBook5,4") ||
-			   machine_is_compatible("PowerBook5,5")) {
+		} else if (of_machine_is_compatible("PowerBook5,1") ||
+			   of_machine_is_compatible("PowerBook5,2") ||
+			   of_machine_is_compatible("PowerBook5,3") ||
+			   of_machine_is_compatible("PowerBook5,4") ||
+			   of_machine_is_compatible("PowerBook5,5")) {
 			/* powerbook with external single link tmds (sil164) */
 			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
-		} else if (machine_is_compatible("PowerBook5,6")) {
+		} else if (of_machine_is_compatible("PowerBook5,6")) {
 			/* powerbook with external dual or single link tmds */
 			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
-		} else if (machine_is_compatible("PowerBook5,7") ||
-			   machine_is_compatible("PowerBook5,8") ||
-			   machine_is_compatible("PowerBook5,9")) {
+		} else if (of_machine_is_compatible("PowerBook5,7") ||
+			   of_machine_is_compatible("PowerBook5,8") ||
+			   of_machine_is_compatible("PowerBook5,9")) {
 			/* PowerBook6,2 ? */
 			/* powerbook with external dual link tmds (sil1178?) */
 			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
-		} else if (machine_is_compatible("PowerBook4,1") ||
-			   machine_is_compatible("PowerBook4,2") ||
-			   machine_is_compatible("PowerBook4,3") ||
-			   machine_is_compatible("PowerBook6,3") ||
-			   machine_is_compatible("PowerBook6,5") ||
-			   machine_is_compatible("PowerBook6,7")) {
+		} else if (of_machine_is_compatible("PowerBook4,1") ||
+			   of_machine_is_compatible("PowerBook4,2") ||
+			   of_machine_is_compatible("PowerBook4,3") ||
+			   of_machine_is_compatible("PowerBook6,3") ||
+			   of_machine_is_compatible("PowerBook6,5") ||
+			   of_machine_is_compatible("PowerBook6,7")) {
 			/* ibook */
 			rdev->mode_info.connector_table = CT_IBOOK;
-		} else if (machine_is_compatible("PowerMac4,4")) {
+		} else if (of_machine_is_compatible("PowerMac4,4")) {
 			/* emac */
 			rdev->mode_info.connector_table = CT_EMAC;
-		} else if (machine_is_compatible("PowerMac10,1")) {
+		} else if (of_machine_is_compatible("PowerMac10,1")) {
 			/* mini with internal tmds */
 			rdev->mode_info.connector_table = CT_MINI_INTERNAL;
-		} else if (machine_is_compatible("PowerMac10,2")) {
+		} else if (of_machine_is_compatible("PowerMac10,2")) {
 			/* mini with external tmds */
 			rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
-		} else if (machine_is_compatible("PowerMac12,1")) {
+		} else if (of_machine_is_compatible("PowerMac12,1")) {
 			/* PowerMac8,1 ? */
 			/* imac g5 isight */
 			rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 24d90ea..71d4c07 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -55,6 +55,12 @@
 menu "Special HID drivers"
 	depends on HID
 
+config HID_3M_PCT
+	tristate "3M PCT"
+	depends on USB_HID
+	---help---
+	Support for 3M PCT touch screens.
+
 config HID_A4TECH
 	tristate "A4 tech" if EMBEDDED
 	depends on USB_HID
@@ -183,6 +189,23 @@
 	  Say Y here if you want to enable force feedback support for Logitech
 	  Rumblepad 2 devices.
 
+config LOGIG940_FF
+	bool "Logitech Flight System G940 force feedback support"
+	depends on HID_LOGITECH
+	select INPUT_FF_MEMLESS
+	help
+	  Say Y here if you want to enable force feedback support for Logitech
+	  Flight System G940 devices.
+
+config HID_MAGICMOUSE
+	tristate "Apple MagicMouse multi-touch support"
+	depends on BT_HIDP
+	---help---
+	Support for the Apple Magic Mouse multi-touch.
+
+	Say Y here if you want support for the multi-touch features of the
+	Apple Wireless "Magic" Mouse.
+
 config HID_MICROSOFT
 	tristate "Microsoft" if EMBEDDED
 	depends on USB_HID
@@ -190,6 +213,12 @@
 	---help---
 	Support for Microsoft devices that are not fully compliant with HID standard.
 
+config HID_MOSART
+	tristate "MosArt"
+	depends on USB_HID
+	---help---
+	Support for MosArt dual-touch panels.
+
 config HID_MONTEREY
 	tristate "Monterey" if EMBEDDED
 	depends on USB_HID
@@ -198,11 +227,17 @@
 	Support for Monterey Genius KB29E.
 
 config HID_NTRIG
-	tristate "NTrig" if EMBEDDED
+	tristate "NTrig"
+	depends on USB_HID
+	---help---
+	Support for N-Trig touch screen.
+
+config HID_ORTEK
+	tristate "Ortek" if EMBEDDED
 	depends on USB_HID
 	default !EMBEDDED
 	---help---
-	Support for N-Trig touch screen.
+	Support for Ortek WKB-2000 wireless keyboard + mouse trackpad.
 
 config HID_PANTHERLORD
 	tristate "Pantherlord support" if EMBEDDED
@@ -227,6 +262,12 @@
 	---help---
 	Support for Petalynx Maxter remote control.
 
+config HID_QUANTA
+	tristate "Quanta Optical Touch"
+	depends on USB_HID
+	---help---
+	Support for Quanta Optical Touch dual-touch panels.
+
 config HID_SAMSUNG
 	tristate "Samsung" if EMBEDDED
 	depends on USB_HID
@@ -241,6 +282,12 @@
 	---help---
 	Support for Sony PS3 controller.
 
+config HID_STANTUM
+	tristate "Stantum"
+	depends on USB_HID
+	---help---
+	Support for Stantum multitouch panel.
+
 config HID_SUNPLUS
 	tristate "Sunplus" if EMBEDDED
 	depends on USB_HID
@@ -305,9 +352,8 @@
 	  Rumble Force or Force Feedback Wheel.
 
 config HID_WACOM
-	tristate "Wacom Bluetooth devices support" if EMBEDDED
+	tristate "Wacom Bluetooth devices support"
 	depends on BT_HIDP
-	default !EMBEDDED
 	---help---
 	Support for Wacom Graphire Bluetooth tablet.
 
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 0de2dff..0b2618f 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -18,7 +18,11 @@
 ifdef CONFIG_LOGIRUMBLEPAD2_FF
 	hid-logitech-objs	+= hid-lg2ff.o
 endif
+ifdef CONFIG_LOGIG940_FF
+	hid-logitech-objs	+= hid-lg3ff.o
+endif
 
+obj-$(CONFIG_HID_3M_PCT)	+= hid-3m-pct.o
 obj-$(CONFIG_HID_A4TECH)	+= hid-a4tech.o
 obj-$(CONFIG_HID_APPLE)		+= hid-apple.o
 obj-$(CONFIG_HID_BELKIN)	+= hid-belkin.o
@@ -31,14 +35,19 @@
 obj-$(CONFIG_HID_KENSINGTON)	+= hid-kensington.o
 obj-$(CONFIG_HID_KYE)		+= hid-kye.o
 obj-$(CONFIG_HID_LOGITECH)	+= hid-logitech.o
+obj-$(CONFIG_HID_MAGICMOUSE)    += hid-magicmouse.o
 obj-$(CONFIG_HID_MICROSOFT)	+= hid-microsoft.o
 obj-$(CONFIG_HID_MONTEREY)	+= hid-monterey.o
+obj-$(CONFIG_HID_MOSART)	+= hid-mosart.o
 obj-$(CONFIG_HID_NTRIG)		+= hid-ntrig.o
+obj-$(CONFIG_HID_ORTEK)		+= hid-ortek.o
+obj-$(CONFIG_HID_QUANTA)	+= hid-quanta.o
 obj-$(CONFIG_HID_PANTHERLORD)	+= hid-pl.o
 obj-$(CONFIG_HID_PETALYNX)	+= hid-petalynx.o
 obj-$(CONFIG_HID_SAMSUNG)	+= hid-samsung.o
 obj-$(CONFIG_HID_SMARTJOYPLUS)	+= hid-sjoy.o
 obj-$(CONFIG_HID_SONY)		+= hid-sony.o
+obj-$(CONFIG_HID_STANTUM)	+= hid-stantum.o
 obj-$(CONFIG_HID_SUNPLUS)	+= hid-sunplus.o
 obj-$(CONFIG_HID_GREENASIA)	+= hid-gaff.o
 obj-$(CONFIG_HID_THRUSTMASTER)	+= hid-tmff.o
diff --git a/drivers/hid/hid-3m-pct.c b/drivers/hid/hid-3m-pct.c
new file mode 100644
index 0000000..2370aef
--- /dev/null
+++ b/drivers/hid/hid-3m-pct.c
@@ -0,0 +1,290 @@
+/*
+ *  HID driver for 3M PCT multitouch panels
+ *
+ *  Copyright (c) 2009 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("3M PCT multitouch panels");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct mmm_finger {
+	__s32 x, y;
+	__u8 rank;
+	bool touch, valid;
+};
+
+struct mmm_data {
+	struct mmm_finger f[10];
+	__u8 curid, num;
+	bool touch, valid;
+};
+
+static int mmm_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	switch (usage->hid & HID_USAGE_PAGE) {
+
+	case HID_UP_BUTTON:
+		return -1;
+
+	case HID_UP_GENDESK:
+		switch (usage->hid) {
+		case HID_GD_X:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_X);
+			/* touchscreen emulation */
+			input_set_abs_params(hi->input, ABS_X,
+						field->logical_minimum,
+						field->logical_maximum, 0, 0);
+			return 1;
+		case HID_GD_Y:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_Y);
+			/* touchscreen emulation */
+			input_set_abs_params(hi->input, ABS_Y,
+						field->logical_minimum,
+						field->logical_maximum, 0, 0);
+			return 1;
+		}
+		return 0;
+
+	case HID_UP_DIGITIZER:
+		switch (usage->hid) {
+		/* we do not want to map these: no input-oriented meaning */
+		case 0x14:
+		case 0x23:
+		case HID_DG_INPUTMODE:
+		case HID_DG_DEVICEINDEX:
+		case HID_DG_CONTACTCOUNT:
+		case HID_DG_CONTACTMAX:
+		case HID_DG_INRANGE:
+		case HID_DG_CONFIDENCE:
+			return -1;
+		case HID_DG_TIPSWITCH:
+			/* touchscreen emulation */
+			hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+			return 1;
+		case HID_DG_CONTACTID:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TRACKING_ID);
+			return 1;
+		}
+		/* let hid-input decide for the others */
+		return 0;
+
+	case 0xff000000:
+		/* we do not want to map these: no input-oriented meaning */
+		return -1;
+	}
+
+	return 0;
+}
+
+static int mmm_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	if (usage->type == EV_KEY || usage->type == EV_ABS)
+		clear_bit(usage->code, *bit);
+
+	return 0;
+}
+
+/*
+ * this function is called when a whole packet has been received and processed,
+ * so that it can decide what to send to the input layer.
+ */
+static void mmm_filter_event(struct mmm_data *md, struct input_dev *input)
+{
+	struct mmm_finger *oldest = 0;
+	bool pressed = false, released = false;
+	int i;
+
+	/*
+	 * we need to iterate on all fingers to decide if we have a press
+	 * or a release event in our touchscreen emulation.
+	 */
+	for (i = 0; i < 10; ++i) {
+		struct mmm_finger *f = &md->f[i];
+		if (!f->valid) {
+			/* this finger is just placeholder data, ignore */
+		} else if (f->touch) {
+			/* this finger is on the screen */
+			input_event(input, EV_ABS, ABS_MT_TRACKING_ID, i);
+			input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x);
+			input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y);
+			input_mt_sync(input);
+			/*
+			 * touchscreen emulation: maintain the age rank
+			 * of this finger, decide if we have a press
+			 */
+			if (f->rank == 0) {
+				f->rank = ++(md->num);
+				if (f->rank == 1)
+					pressed = true;
+			}
+			if (f->rank == 1)
+				oldest = f;
+		} else {
+			/* this finger took off the screen */
+			/* touchscreen emulation: maintain age rank of others */
+			int j;
+
+			for (j = 0; j < 10; ++j) {
+				struct mmm_finger *g = &md->f[j];
+				if (g->rank > f->rank) {
+					g->rank--;
+					if (g->rank == 1)
+						oldest = g;
+				}
+			}
+			f->rank = 0;
+			--(md->num);
+			if (md->num == 0)
+				released = true;
+		}
+		f->valid = 0;
+	}
+
+	/* touchscreen emulation */
+	if (oldest) {
+		if (pressed)
+			input_event(input, EV_KEY, BTN_TOUCH, 1);
+		input_event(input, EV_ABS, ABS_X, oldest->x);
+		input_event(input, EV_ABS, ABS_Y, oldest->y);
+	} else if (released) {
+		input_event(input, EV_KEY, BTN_TOUCH, 0);
+	}
+}
+
+/*
+ * this function is called upon all reports
+ * so that we can accumulate contact point information,
+ * and call input_mt_sync after each point.
+ */
+static int mmm_event(struct hid_device *hid, struct hid_field *field,
+				struct hid_usage *usage, __s32 value)
+{
+	struct mmm_data *md = hid_get_drvdata(hid);
+	/*
+	 * strangely, this function can be called before
+	 * field->hidinput is initialized!
+	 */
+	if (hid->claimed & HID_CLAIMED_INPUT) {
+		struct input_dev *input = field->hidinput->input;
+		switch (usage->hid) {
+		case HID_DG_TIPSWITCH:
+			md->touch = value;
+			break;
+		case HID_DG_CONFIDENCE:
+			md->valid = value;
+			break;
+		case HID_DG_CONTACTID:
+			if (md->valid) {
+				md->curid = value;
+				md->f[value].touch = md->touch;
+				md->f[value].valid = 1;
+			}
+			break;
+		case HID_GD_X:
+			if (md->valid)
+				md->f[md->curid].x = value;
+			break;
+		case HID_GD_Y:
+			if (md->valid)
+				md->f[md->curid].y = value;
+			break;
+		case HID_DG_CONTACTCOUNT:
+			mmm_filter_event(md, input);
+			break;
+		}
+	}
+
+	/* we have handled the hidinput part, now remains hiddev */
+	if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+		hid->hiddev_hid_event(hid, field, usage, value);
+
+	return 1;
+}
+
+static int mmm_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret;
+	struct mmm_data *md;
+
+	md = kzalloc(sizeof(struct mmm_data), GFP_KERNEL);
+	if (!md) {
+		dev_err(&hdev->dev, "cannot allocate 3M data\n");
+		return -ENOMEM;
+	}
+	hid_set_drvdata(hdev, md);
+
+	ret = hid_parse(hdev);
+	if (!ret)
+		ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+	if (ret)
+		kfree(md);
+	return ret;
+}
+
+static void mmm_remove(struct hid_device *hdev)
+{
+	hid_hw_stop(hdev);
+	kfree(hid_get_drvdata(hdev));
+	hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id mmm_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, mmm_devices);
+
+static const struct hid_usage_id mmm_grabbed_usages[] = {
+	{ HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+	{ HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver mmm_driver = {
+	.name = "3m-pct",
+	.id_table = mmm_devices,
+	.probe = mmm_probe,
+	.remove = mmm_remove,
+	.input_mapping = mmm_input_mapping,
+	.input_mapped = mmm_input_mapped,
+	.usage_table = mmm_grabbed_usages,
+	.event = mmm_event,
+};
+
+static int __init mmm_init(void)
+{
+	return hid_register_driver(&mmm_driver);
+}
+
+static void __exit mmm_exit(void)
+{
+	hid_unregister_driver(&mmm_driver);
+}
+
+module_init(mmm_init);
+module_exit(mmm_exit);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 5b4d66d..78286b1 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -40,6 +40,11 @@
 MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, "
 		"[1] = fkeyslast, 2 = fkeysfirst)");
 
+static unsigned int iso_layout = 1;
+module_param(iso_layout, uint, 0644);
+MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. "
+		"(0 = disabled, [1] = enabled)");
+
 struct apple_sc {
 	unsigned long quirks;
 	unsigned int fn_on;
@@ -199,11 +204,13 @@
 		}
 	}
 
-	if (asc->quirks & APPLE_ISO_KEYBOARD) {
-		trans = apple_find_translation(apple_iso_keyboard, usage->code);
-		if (trans) {
-			input_event(input, usage->type, trans->to, value);
-			return 1;
+        if (iso_layout) {
+		if (asc->quirks & APPLE_ISO_KEYBOARD) {
+			trans = apple_find_translation(apple_iso_keyboard, usage->code);
+			if (trans) {
+				input_event(input, usage->type, trans->to, value);
+				return 1;
+			}
 		}
 	}
 
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index eabe5f8..368fbb0 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -4,7 +4,7 @@
  *  Copyright (c) 1999 Andreas Gal
  *  Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
  *  Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
- *  Copyright (c) 2006-2007 Jiri Kosina
+ *  Copyright (c) 2006-2010 Jiri Kosina
  */
 
 /*
@@ -51,7 +51,7 @@
  * Register a new report for a device.
  */
 
-static struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id)
+struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id)
 {
 	struct hid_report_enum *report_enum = device->report_enum + type;
 	struct hid_report *report;
@@ -75,6 +75,7 @@
 
 	return report;
 }
+EXPORT_SYMBOL_GPL(hid_register_report);
 
 /*
  * Register a new field for this report.
@@ -387,7 +388,8 @@
 	__u32 data;
 	unsigned n;
 
-	if (item->size == 0) {
+	/* Local delimiter could have value 0, which allows size to be 0 */
+	if (item->size == 0 && item->tag != HID_LOCAL_ITEM_TAG_DELIMITER) {
 		dbg_hid("item data expected for local item\n");
 		return -1;
 	}
@@ -1248,11 +1250,13 @@
 
 /* a list of devices for which there is a specialized driver on HID bus */
 static const struct hid_device_id hid_blacklist[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
@@ -1324,6 +1328,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
@@ -1337,10 +1342,15 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
@@ -1543,8 +1553,9 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM)},
-	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2)},
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)},
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
 	{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
@@ -1661,8 +1672,6 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY1) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 6abd036..cd4ece6 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -864,13 +864,13 @@
 	[EV_SND] = sounds,			[EV_REP] = repeats,
 };
 
-void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f) {
-
+static void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f)
+{
 	seq_printf(f, "%s.%s", events[type] ? events[type] : "?",
 		names[type] ? (names[type][code] ? names[type][code] : "?") : "?");
 }
 
-void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f)
+static void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f)
 {
 	int i, j, k;
 	struct hid_report *report;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 010368e..72c05f9 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -18,6 +18,9 @@
 #ifndef HID_IDS_H_FILE
 #define HID_IDS_H_FILE
 
+#define USB_VENDOR_ID_3M		0x0596
+#define USB_DEVICE_ID_3M1968		0x0500
+
 #define USB_VENDOR_ID_A4TECH		0x09da
 #define USB_DEVICE_ID_A4TECH_WCP32PU	0x0006
 #define USB_DEVICE_ID_A4TECH_X5_005D	0x000a
@@ -56,6 +59,7 @@
 
 #define USB_VENDOR_ID_APPLE		0x05ac
 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE	0x0304
+#define USB_DEVICE_ID_APPLE_MAGICMOUSE	0x030d
 #define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI	0x020e
 #define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO	0x020f
 #define USB_DEVICE_ID_APPLE_GEYSER_ANSI	0x0214
@@ -96,9 +100,12 @@
 #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL	0x8241
 #define USB_DEVICE_ID_APPLE_IRCONTROL4	0x8242
 
-#define USB_VENDOR_ID_ASUS		0x0b05
-#define USB_DEVICE_ID_ASUS_LCM		0x1726
-#define USB_DEVICE_ID_ASUS_LCM2		0x175b
+#define USB_VENDOR_ID_ASUS		0x0486
+#define USB_DEVICE_ID_ASUS_T91MT	0x0185
+
+#define USB_VENDOR_ID_ASUSTEK		0x0b05
+#define USB_DEVICE_ID_ASUSTEK_LCM	0x1726
+#define USB_DEVICE_ID_ASUSTEK_LCM2	0x175b
 
 #define USB_VENDOR_ID_ATEN		0x0557
 #define USB_DEVICE_ID_ATEN_UC100KM	0x2004
@@ -169,6 +176,9 @@
 #define USB_VENDOR_ID_ESSENTIAL_REALITY	0x0d7f
 #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
 
+#define USB_VENDOR_ID_ETURBOTOUCH	0x22b9
+#define USB_DEVICE_ID_ETURBOTOUCH	0x0006
+
 #define USB_VENDOR_ID_ETT		0x0664
 #define USB_DEVICE_ID_TC5UH		0x0309
 
@@ -303,6 +313,7 @@
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2	0xc219
 #define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D	0xc283
 #define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO	0xc286
+#define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940	0xc287
 #define USB_DEVICE_ID_LOGITECH_WHEEL	0xc294
 #define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG	0xc293
 #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL	0xc295
@@ -365,6 +376,9 @@
 #define USB_VENDOR_ID_ONTRAK		0x0a07
 #define USB_DEVICE_ID_ONTRAK_ADU100	0x0064
 
+#define USB_VENDOR_ID_ORTEK		0x05a4
+#define USB_DEVICE_ID_ORTEK_WKB2000	0x2000
+
 #define USB_VENDOR_ID_PANJIT		0x134c
 
 #define USB_VENDOR_ID_PANTHERLORD	0x0810
@@ -382,9 +396,16 @@
 #define USB_VENDOR_ID_POWERCOM		0x0d9f
 #define USB_DEVICE_ID_POWERCOM_UPS	0x0002
 
+#define USB_VENDOR_ID_PRODIGE		0x05af
+#define USB_DEVICE_ID_PRODIGE_CORDLESS	0x3062
+
 #define USB_VENDOR_ID_SAITEK		0x06a3
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD	0xff17
 
+#define USB_VENDOR_ID_QUANTA		0x0408
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH	0x3000
+#define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN	0x3001
+
 #define USB_VENDOR_ID_SAMSUNG		0x0419
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE	0x0001
 
@@ -396,18 +417,20 @@
 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST	0x0034
 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST	0x0046
 
+#define USB_VENDOR_ID_STANTUM		0x1f87
+#define USB_DEVICE_ID_MTP		0x0002
+
 #define USB_VENDOR_ID_SUN		0x0430
 #define USB_DEVICE_ID_RARITAN_KVM_DONGLE	0xcdab
 
 #define USB_VENDOR_ID_SUNPLUS		0x04fc
 #define USB_DEVICE_ID_SUNPLUS_WDESKTOP	0x05d8
 
-#define USB_VENDOR_ID_TENX		0x1130
-#define USB_DEVICE_ID_TENX_IBUDDY1	0x0001
-#define USB_DEVICE_ID_TENX_IBUDDY2	0x0002
-
 #define USB_VENDOR_ID_THRUSTMASTER	0x044f
 
+#define USB_VENDOR_ID_TOUCHPACK		0x1bfd
+#define USB_DEVICE_ID_TOUCHPACK_RTS	0x1688
+
 #define USB_VENDOR_ID_TOPMAX		0x0663
 #define USB_DEVICE_ID_TOPMAX_COBRAPAD	0x0103
 
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 5862b0f..79d9edd 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1,6 +1,6 @@
 /*
  *  Copyright (c) 2000-2001 Vojtech Pavlik
- *  Copyright (c) 2006-2007 Jiri Kosina
+ *  Copyright (c) 2006-2010 Jiri Kosina
  *
  *  HID to Linux Input mapping
  */
@@ -193,12 +193,17 @@
 		break;
 
 	case HID_UP_BUTTON:
-		code = ((usage->hid - 1) & 0xf);
+		code = ((usage->hid - 1) & HID_USAGE);
 
 		switch (field->application) {
 		case HID_GD_MOUSE:
 		case HID_GD_POINTER:  code += 0x110; break;
-		case HID_GD_JOYSTICK: code += 0x120; break;
+		case HID_GD_JOYSTICK:
+				      if (code <= 0xf)
+					      code += BTN_JOYSTICK;
+				      else
+					      code += BTN_TRIGGER_HAPPY;
+				      break;
 		case HID_GD_GAMEPAD:  code += 0x130; break;
 		default:
 			switch (field->physical) {
@@ -400,6 +405,7 @@
 		case 0x192: map_key_clear(KEY_CALC);		break;
 		case 0x194: map_key_clear(KEY_FILE);		break;
 		case 0x196: map_key_clear(KEY_WWW);		break;
+		case 0x199: map_key_clear(KEY_CHAT);		break;
 		case 0x19c: map_key_clear(KEY_LOGOFF);		break;
 		case 0x19e: map_key_clear(KEY_COFFEE);		break;
 		case 0x1a6: map_key_clear(KEY_HELP);		break;
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 9fcd3d0..3677c90 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -34,6 +34,7 @@
 #define LG_FF			0x200
 #define LG_FF2			0x400
 #define LG_RDESC_REL_ABS	0x800
+#define LG_FF3			0x1000
 
 /*
  * Certain Logitech keyboards send in report #3 keys which are far
@@ -266,7 +267,7 @@
 		goto err_free;
 	}
 
-	if (quirks & (LG_FF | LG_FF2))
+	if (quirks & (LG_FF | LG_FF2 | LG_FF3))
 		connect_mask &= ~HID_CONNECT_FF;
 
 	ret = hid_hw_start(hdev, connect_mask);
@@ -279,6 +280,8 @@
 		lgff_init(hdev);
 	if (quirks & LG_FF2)
 		lg2ff_init(hdev);
+	if (quirks & LG_FF3)
+		lg3ff_init(hdev);
 
 	return 0;
 err_free:
@@ -331,6 +334,8 @@
 		.driver_data = LG_FF },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
 		.driver_data = LG_FF2 },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
+		.driver_data = LG_FF3 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
 		.driver_data = LG_RDESC_REL_ABS },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER),
diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h
index bf31592..ce2ac86 100644
--- a/drivers/hid/hid-lg.h
+++ b/drivers/hid/hid-lg.h
@@ -13,4 +13,10 @@
 static inline int lg2ff_init(struct hid_device *hdev) { return -1; }
 #endif
 
+#ifdef CONFIG_LOGIG940_FF
+int lg3ff_init(struct hid_device *hdev);
+#else
+static inline int lg3ff_init(struct hid_device *hdev) { return -1; }
+#endif
+
 #endif
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
new file mode 100644
index 0000000..4002832
--- /dev/null
+++ b/drivers/hid/hid-lg3ff.c
@@ -0,0 +1,176 @@
+/*
+ *  Force feedback support for Logitech Flight System G940
+ *
+ *  Copyright (c) 2009 Gary Stein <LordCnidarian@gmail.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/input.h>
+#include <linux/usb.h>
+#include <linux/hid.h>
+
+#include "usbhid/usbhid.h"
+#include "hid-lg.h"
+
+/*
+ * G940 Theory of Operation (from experimentation)
+ *
+ * There are 63 fields (only 3 of them currently used)
+ * 0 - seems to be command field
+ * 1 - 30 deal with the x axis
+ * 31 -60 deal with the y axis
+ *
+ * Field 1 is x axis constant force
+ * Field 31 is y axis constant force
+ *
+ * other interesting fields 1,2,3,4 on x axis
+ * (same for 31,32,33,34 on y axis)
+ *
+ * 0 0 127 127 makes the joystick autocenter hard
+ *
+ * 127 0 127 127 makes the joystick loose on the right,
+ * but stops all movemnt left
+ *
+ * -127 0 -127 -127 makes the joystick loose on the left,
+ * but stops all movement right
+ *
+ * 0 0 -127 -127 makes the joystick rattle very hard
+ *
+ * I'm sure these are effects that I don't know enough about them
+ */
+
+struct lg3ff_device {
+	struct hid_report *report;
+};
+
+static int hid_lg3ff_play(struct input_dev *dev, void *data,
+			 struct ff_effect *effect)
+{
+	struct hid_device *hid = input_get_drvdata(dev);
+	struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+	struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+	int x, y;
+
+/*
+ * Maxusage should always be 63 (maximum fields)
+ * likely a better way to ensure this data is clean
+ */
+	memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage);
+
+	switch (effect->type) {
+	case FF_CONSTANT:
+/*
+ * Already clamped in ff_memless
+ * 0 is center (different then other logitech)
+ */
+		x = effect->u.ramp.start_level;
+		y = effect->u.ramp.end_level;
+
+		/* send command byte */
+		report->field[0]->value[0] = 0x51;
+
+/*
+ * Sign backwards from other Force3d pro
+ * which get recast here in two's complement 8 bits
+ */
+		report->field[0]->value[1] = (unsigned char)(-x);
+		report->field[0]->value[31] = (unsigned char)(-y);
+
+		usbhid_submit_report(hid, report, USB_DIR_OUT);
+		break;
+	}
+	return 0;
+}
+static void hid_lg3ff_set_autocenter(struct input_dev *dev, u16 magnitude)
+{
+	struct hid_device *hid = input_get_drvdata(dev);
+	struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+	struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+
+/*
+ * Auto Centering probed from device
+ * NOTE: deadman's switch on G940 must be covered
+ * for effects to work
+ */
+	report->field[0]->value[0] = 0x51;
+	report->field[0]->value[1] = 0x00;
+	report->field[0]->value[2] = 0x00;
+	report->field[0]->value[3] = 0x7F;
+	report->field[0]->value[4] = 0x7F;
+	report->field[0]->value[31] = 0x00;
+	report->field[0]->value[32] = 0x00;
+	report->field[0]->value[33] = 0x7F;
+	report->field[0]->value[34] = 0x7F;
+
+	usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+
+static const signed short ff3_joystick_ac[] = {
+	FF_CONSTANT,
+	FF_AUTOCENTER,
+	-1
+};
+
+int lg3ff_init(struct hid_device *hid)
+{
+	struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+	struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+	struct input_dev *dev = hidinput->input;
+	struct hid_report *report;
+	struct hid_field *field;
+	const signed short *ff_bits = ff3_joystick_ac;
+	int error;
+	int i;
+
+	/* Find the report to use */
+	if (list_empty(report_list)) {
+		err_hid("No output report found");
+		return -1;
+	}
+
+	/* Check that the report looks ok */
+	report = list_entry(report_list->next, struct hid_report, list);
+	if (!report) {
+		err_hid("NULL output report");
+		return -1;
+	}
+
+	field = report->field[0];
+	if (!field) {
+		err_hid("NULL field");
+		return -1;
+	}
+
+	/* Assume single fixed device G940 */
+	for (i = 0; ff_bits[i] >= 0; i++)
+		set_bit(ff_bits[i], dev->ffbit);
+
+	error = input_ff_create_memless(dev, NULL, hid_lg3ff_play);
+	if (error)
+		return error;
+
+	if (test_bit(FF_AUTOCENTER, dev->ffbit))
+		dev->ff->set_autocenter = hid_lg3ff_set_autocenter;
+
+	dev_info(&hid->dev, "Force feedback for Logitech Flight System G940 by "
+			"Gary Stein <LordCnidarian@gmail.com>\n");
+	return 0;
+}
+
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index 987abeb..61142b7 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -67,6 +67,7 @@
 	{ 0x046d, 0xc219, ff_rumble },
 	{ 0x046d, 0xc283, ff_joystick },
 	{ 0x046d, 0xc286, ff_joystick_ac },
+	{ 0x046d, 0xc287, ff_joystick_ac },
 	{ 0x046d, 0xc293, ff_joystick },
 	{ 0x046d, 0xc294, ff_wheel },
 	{ 0x046d, 0xc295, ff_joystick },
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
new file mode 100644
index 0000000..4a3a94f
--- /dev/null
+++ b/drivers/hid/hid-magicmouse.c
@@ -0,0 +1,449 @@
+/*
+ *   Apple "Magic" Wireless Mouse driver
+ *
+ *   Copyright (c) 2010 Michael Poole <mdpoole@troilus.org>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+
+static bool emulate_3button = true;
+module_param(emulate_3button, bool, 0644);
+MODULE_PARM_DESC(emulate_3button, "Emulate a middle button");
+
+static int middle_button_start = -350;
+static int middle_button_stop = +350;
+
+static bool emulate_scroll_wheel = true;
+module_param(emulate_scroll_wheel, bool, 0644);
+MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel");
+
+static bool report_touches = true;
+module_param(report_touches, bool, 0644);
+MODULE_PARM_DESC(report_touches, "Emit touch records (otherwise, only use them for emulation)");
+
+static bool report_undeciphered;
+module_param(report_undeciphered, bool, 0644);
+MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
+
+#define TOUCH_REPORT_ID   0x29
+/* These definitions are not precise, but they're close enough.  (Bits
+ * 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem
+ * to be some kind of bit mask -- 0x20 may be a near-field reading,
+ * and 0x40 is actual contact, and 0x10 may be a start/stop or change
+ * indication.)
+ */
+#define TOUCH_STATE_MASK  0xf0
+#define TOUCH_STATE_NONE  0x00
+#define TOUCH_STATE_START 0x30
+#define TOUCH_STATE_DRAG  0x40
+
+/**
+ * struct magicmouse_sc - Tracks Magic Mouse-specific data.
+ * @input: Input device through which we report events.
+ * @quirks: Currently unused.
+ * @last_timestamp: Timestamp from most recent (18-bit) touch report
+ *     (units of milliseconds over short windows, but seems to
+ *     increase faster when there are no touches).
+ * @delta_time: 18-bit difference between the two most recent touch
+ *     reports from the mouse.
+ * @ntouches: Number of touches in most recent touch report.
+ * @scroll_accel: Number of consecutive scroll motions.
+ * @scroll_jiffies: Time of last scroll motion.
+ * @touches: Most recent data for a touch, indexed by tracking ID.
+ * @tracking_ids: Mapping of current touch input data to @touches.
+ */
+struct magicmouse_sc {
+	struct input_dev *input;
+	unsigned long quirks;
+
+	int last_timestamp;
+	int delta_time;
+	int ntouches;
+	int scroll_accel;
+	unsigned long scroll_jiffies;
+
+	struct {
+		short x;
+		short y;
+		short scroll_y;
+		u8 size;
+	} touches[16];
+	int tracking_ids[16];
+};
+
+static int magicmouse_firm_touch(struct magicmouse_sc *msc)
+{
+	int touch = -1;
+	int ii;
+
+	/* If there is only one "firm" touch, set touch to its
+	 * tracking ID.
+	 */
+	for (ii = 0; ii < msc->ntouches; ii++) {
+		int idx = msc->tracking_ids[ii];
+		if (msc->touches[idx].size < 8) {
+			/* Ignore this touch. */
+		} else if (touch >= 0) {
+			touch = -1;
+			break;
+		} else {
+			touch = idx;
+		}
+	}
+
+	return touch;
+}
+
+static void magicmouse_emit_buttons(struct magicmouse_sc *msc, int state)
+{
+	int last_state = test_bit(BTN_LEFT, msc->input->key) << 0 |
+		test_bit(BTN_RIGHT, msc->input->key) << 1 |
+		test_bit(BTN_MIDDLE, msc->input->key) << 2;
+
+	if (emulate_3button) {
+		int id;
+
+		/* If some button was pressed before, keep it held
+		 * down.  Otherwise, if there's exactly one firm
+		 * touch, use that to override the mouse's guess.
+		 */
+		if (state == 0) {
+			/* The button was released. */
+		} else if (last_state != 0) {
+			state = last_state;
+		} else if ((id = magicmouse_firm_touch(msc)) >= 0) {
+			int x = msc->touches[id].x;
+			if (x < middle_button_start)
+				state = 1;
+			else if (x > middle_button_stop)
+				state = 2;
+			else
+				state = 4;
+		} /* else: we keep the mouse's guess */
+
+		input_report_key(msc->input, BTN_MIDDLE, state & 4);
+	}
+
+	input_report_key(msc->input, BTN_LEFT, state & 1);
+	input_report_key(msc->input, BTN_RIGHT, state & 2);
+
+	if (state != last_state)
+		msc->scroll_accel = 0;
+}
+
+static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tdata)
+{
+	struct input_dev *input = msc->input;
+	__s32 x_y = tdata[0] << 8 | tdata[1] << 16 | tdata[2] << 24;
+	int misc = tdata[5] | tdata[6] << 8;
+	int id = (misc >> 6) & 15;
+	int x = x_y << 12 >> 20;
+	int y = -(x_y >> 20);
+
+	/* Store tracking ID and other fields. */
+	msc->tracking_ids[raw_id] = id;
+	msc->touches[id].x = x;
+	msc->touches[id].y = y;
+	msc->touches[id].size = misc & 63;
+
+	/* If requested, emulate a scroll wheel by detecting small
+	 * vertical touch motions along the middle of the mouse.
+	 */
+	if (emulate_scroll_wheel &&
+	    middle_button_start < x && x < middle_button_stop) {
+		static const int accel_profile[] = {
+			256, 228, 192, 160, 128, 96, 64, 32,
+		};
+		unsigned long now = jiffies;
+		int step = msc->touches[id].scroll_y - y;
+
+		/* Reset acceleration after half a second. */
+		if (time_after(now, msc->scroll_jiffies + HZ / 2))
+			msc->scroll_accel = 0;
+
+		/* Calculate and apply the scroll motion. */
+		switch (tdata[7] & TOUCH_STATE_MASK) {
+		case TOUCH_STATE_START:
+			msc->touches[id].scroll_y = y;
+			msc->scroll_accel = min_t(int, msc->scroll_accel + 1,
+						ARRAY_SIZE(accel_profile) - 1);
+			break;
+		case TOUCH_STATE_DRAG:
+			step = step / accel_profile[msc->scroll_accel];
+			if (step != 0) {
+				msc->touches[id].scroll_y = y;
+				msc->scroll_jiffies = now;
+				input_report_rel(input, REL_WHEEL, step);
+			}
+			break;
+		}
+	}
+
+	/* Generate the input events for this touch. */
+	if (report_touches) {
+		int orientation = (misc >> 10) - 32;
+
+		input_report_abs(input, ABS_MT_TRACKING_ID, id);
+		input_report_abs(input, ABS_MT_TOUCH_MAJOR, tdata[3]);
+		input_report_abs(input, ABS_MT_TOUCH_MINOR, tdata[4]);
+		input_report_abs(input, ABS_MT_ORIENTATION, orientation);
+		input_report_abs(input, ABS_MT_POSITION_X, x);
+		input_report_abs(input, ABS_MT_POSITION_Y, y);
+
+		if (report_undeciphered)
+			input_event(input, EV_MSC, MSC_RAW, tdata[7]);
+
+		input_mt_sync(input);
+	}
+}
+
+static int magicmouse_raw_event(struct hid_device *hdev,
+		struct hid_report *report, u8 *data, int size)
+{
+	struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+	struct input_dev *input = msc->input;
+	int x, y, ts, ii, clicks;
+
+	switch (data[0]) {
+	case 0x10:
+		if (size != 6)
+			return 0;
+		x = (__s16)(data[2] | data[3] << 8);
+		y = (__s16)(data[4] | data[5] << 8);
+		clicks = data[1];
+		break;
+	case TOUCH_REPORT_ID:
+		/* Expect six bytes of prefix, and N*8 bytes of touch data. */
+		if (size < 6 || ((size - 6) % 8) != 0)
+			return 0;
+		ts = data[3] >> 6 | data[4] << 2 | data[5] << 10;
+		msc->delta_time = (ts - msc->last_timestamp) & 0x3ffff;
+		msc->last_timestamp = ts;
+		msc->ntouches = (size - 6) / 8;
+		for (ii = 0; ii < msc->ntouches; ii++)
+			magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
+		/* When emulating three-button mode, it is important
+		 * to have the current touch information before
+		 * generating a click event.
+		 */
+		x = (signed char)data[1];
+		y = (signed char)data[2];
+		clicks = data[3];
+		break;
+	case 0x20: /* Theoretically battery status (0-100), but I have
+		    * never seen it -- maybe it is only upon request.
+		    */
+	case 0x60: /* Unknown, maybe laser on/off. */
+	case 0x61: /* Laser reflection status change.
+		    * data[1]: 0 = spotted, 1 = lost
+		    */
+	default:
+		return 0;
+	}
+
+	magicmouse_emit_buttons(msc, clicks & 3);
+	input_report_rel(input, REL_X, x);
+	input_report_rel(input, REL_Y, y);
+	input_sync(input);
+	return 1;
+}
+
+static int magicmouse_input_open(struct input_dev *dev)
+{
+	struct hid_device *hid = input_get_drvdata(dev);
+
+	return hid->ll_driver->open(hid);
+}
+
+static void magicmouse_input_close(struct input_dev *dev)
+{
+	struct hid_device *hid = input_get_drvdata(dev);
+
+	hid->ll_driver->close(hid);
+}
+
+static void magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
+{
+	input_set_drvdata(input, hdev);
+	input->event = hdev->ll_driver->hidinput_input_event;
+	input->open = magicmouse_input_open;
+	input->close = magicmouse_input_close;
+
+	input->name = hdev->name;
+	input->phys = hdev->phys;
+	input->uniq = hdev->uniq;
+	input->id.bustype = hdev->bus;
+	input->id.vendor = hdev->vendor;
+	input->id.product = hdev->product;
+	input->id.version = hdev->version;
+	input->dev.parent = hdev->dev.parent;
+
+	__set_bit(EV_KEY, input->evbit);
+	__set_bit(BTN_LEFT, input->keybit);
+	__set_bit(BTN_RIGHT, input->keybit);
+	if (emulate_3button)
+		__set_bit(BTN_MIDDLE, input->keybit);
+	__set_bit(BTN_TOOL_FINGER, input->keybit);
+
+	__set_bit(EV_REL, input->evbit);
+	__set_bit(REL_X, input->relbit);
+	__set_bit(REL_Y, input->relbit);
+	if (emulate_scroll_wheel)
+		__set_bit(REL_WHEEL, input->relbit);
+
+	if (report_touches) {
+		__set_bit(EV_ABS, input->evbit);
+
+		input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0);
+		input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0);
+		input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0);
+		input_set_abs_params(input, ABS_MT_ORIENTATION, -32, 31, 1, 0);
+		input_set_abs_params(input, ABS_MT_POSITION_X, -1100, 1358,
+				4, 0);
+		/* Note: Touch Y position from the device is inverted relative
+		 * to how pointer motion is reported (and relative to how USB
+		 * HID recommends the coordinates work).  This driver keeps
+		 * the origin at the same position, and just uses the additive
+		 * inverse of the reported Y.
+		 */
+		input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, 2047,
+				4, 0);
+	}
+
+	if (report_undeciphered) {
+		__set_bit(EV_MSC, input->evbit);
+		__set_bit(MSC_RAW, input->mscbit);
+	}
+}
+
+static int magicmouse_probe(struct hid_device *hdev,
+	const struct hid_device_id *id)
+{
+	__u8 feature_1[] = { 0xd7, 0x01 };
+	__u8 feature_2[] = { 0xf8, 0x01, 0x32 };
+	struct input_dev *input;
+	struct magicmouse_sc *msc;
+	struct hid_report *report;
+	int ret;
+
+	msc = kzalloc(sizeof(*msc), GFP_KERNEL);
+	if (msc == NULL) {
+		dev_err(&hdev->dev, "can't alloc magicmouse descriptor\n");
+		return -ENOMEM;
+	}
+
+	msc->quirks = id->driver_data;
+	hid_set_drvdata(hdev, msc);
+
+	ret = hid_parse(hdev);
+	if (ret) {
+		dev_err(&hdev->dev, "magicmouse hid parse failed\n");
+		goto err_free;
+	}
+
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (ret) {
+		dev_err(&hdev->dev, "magicmouse hw start failed\n");
+		goto err_free;
+	}
+
+	report = hid_register_report(hdev, HID_INPUT_REPORT, TOUCH_REPORT_ID);
+	if (!report) {
+		dev_err(&hdev->dev, "unable to register touch report\n");
+		ret = -ENOMEM;
+		goto err_stop_hw;
+	}
+	report->size = 6;
+
+	ret = hdev->hid_output_raw_report(hdev, feature_1, sizeof(feature_1),
+			HID_FEATURE_REPORT);
+	if (ret != sizeof(feature_1)) {
+		dev_err(&hdev->dev, "unable to request touch data (1:%d)\n",
+				ret);
+		goto err_stop_hw;
+	}
+	ret = hdev->hid_output_raw_report(hdev, feature_2,
+			sizeof(feature_2), HID_FEATURE_REPORT);
+	if (ret != sizeof(feature_2)) {
+		dev_err(&hdev->dev, "unable to request touch data (2:%d)\n",
+				ret);
+		goto err_stop_hw;
+	}
+
+	input = input_allocate_device();
+	if (!input) {
+		dev_err(&hdev->dev, "can't alloc input device\n");
+		ret = -ENOMEM;
+		goto err_stop_hw;
+	}
+	magicmouse_setup_input(input, hdev);
+
+	ret = input_register_device(input);
+	if (ret) {
+		dev_err(&hdev->dev, "input device registration failed\n");
+		goto err_input;
+	}
+	msc->input = input;
+
+	return 0;
+err_input:
+	input_free_device(input);
+err_stop_hw:
+	hid_hw_stop(hdev);
+err_free:
+	kfree(msc);
+	return ret;
+}
+
+static void magicmouse_remove(struct hid_device *hdev)
+{
+	hid_hw_stop(hdev);
+	kfree(hid_get_drvdata(hdev));
+}
+
+static const struct hid_device_id magic_mice[] = {
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE),
+		.driver_data = 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, magic_mice);
+
+static struct hid_driver magicmouse_driver = {
+	.name = "magicmouse",
+	.id_table = magic_mice,
+	.probe = magicmouse_probe,
+	.remove = magicmouse_remove,
+	.raw_event = magicmouse_raw_event,
+};
+
+static int __init magicmouse_init(void)
+{
+	int ret;
+
+	ret = hid_register_driver(&magicmouse_driver);
+	if (ret)
+		printk(KERN_ERR "can't register magicmouse driver\n");
+
+	return ret;
+}
+
+static void __exit magicmouse_exit(void)
+{
+	hid_unregister_driver(&magicmouse_driver);
+}
+
+module_init(magicmouse_init);
+module_exit(magicmouse_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
new file mode 100644
index 0000000..c871816
--- /dev/null
+++ b/drivers/hid/hid-mosart.c
@@ -0,0 +1,273 @@
+/*
+ *  HID driver for the multitouch panel on the ASUS EeePC T91MT
+ *
+ *  Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
+ *  Copyright (c) 2010 Teemu Tuominen <teemu.tuominen@cybercom.com>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include "usbhid/usbhid.h"
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("MosArt dual-touch panel");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct mosart_data {
+	__u16 x, y;
+	__u8 id;
+	bool valid;		/* valid finger data, or just placeholder? */
+	bool first;		/* is this the first finger in this frame? */
+	bool activity_now;	/* at least one active finger in this frame? */
+	bool activity;		/* at least one active finger previously? */
+};
+
+static int mosart_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	switch (usage->hid & HID_USAGE_PAGE) {
+
+	case HID_UP_GENDESK:
+		switch (usage->hid) {
+		case HID_GD_X:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_X);
+			/* touchscreen emulation */
+			input_set_abs_params(hi->input, ABS_X,
+						field->logical_minimum,
+						field->logical_maximum, 0, 0);
+			return 1;
+		case HID_GD_Y:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_Y);
+			/* touchscreen emulation */
+			input_set_abs_params(hi->input, ABS_Y,
+						field->logical_minimum,
+						field->logical_maximum, 0, 0);
+			return 1;
+		}
+		return 0;
+
+	case HID_UP_DIGITIZER:
+		switch (usage->hid) {
+		case HID_DG_CONFIDENCE:
+		case HID_DG_TIPSWITCH:
+		case HID_DG_INPUTMODE:
+		case HID_DG_DEVICEINDEX:
+		case HID_DG_CONTACTCOUNT:
+		case HID_DG_CONTACTMAX:
+		case HID_DG_TIPPRESSURE:
+		case HID_DG_WIDTH:
+		case HID_DG_HEIGHT:
+			return -1;
+		case HID_DG_INRANGE:
+			/* touchscreen emulation */
+			hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+			return 1;
+
+		case HID_DG_CONTACTID:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TRACKING_ID);
+			return 1;
+
+		}
+		return 0;
+
+	case 0xff000000:
+		/* ignore HID features */
+		return -1;
+	}
+
+	return 0;
+}
+
+static int mosart_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	if (usage->type == EV_KEY || usage->type == EV_ABS)
+		clear_bit(usage->code, *bit);
+
+	return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void mosart_filter_event(struct mosart_data *td, struct input_dev *input)
+{
+	td->first = !td->first; /* touchscreen emulation */
+
+	if (!td->valid) {
+		/*
+		 * touchscreen emulation: if no finger in this frame is valid
+		 * and there previously was finger activity, this is a release
+		 */ 
+		if (!td->first && !td->activity_now && td->activity) {
+			input_event(input, EV_KEY, BTN_TOUCH, 0);
+			td->activity = false;
+		}
+		return;
+	}
+
+	input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
+	input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+	input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+
+	input_mt_sync(input);
+	td->valid = false;
+
+	/* touchscreen emulation: if first active finger in this frame... */
+	if (!td->activity_now) {
+		/* if there was no previous activity, emit touch event */
+		if (!td->activity) {
+			input_event(input, EV_KEY, BTN_TOUCH, 1);
+			td->activity = true;
+		}
+		td->activity_now = true;
+		/* and in any case this is our preferred finger */
+		input_event(input, EV_ABS, ABS_X, td->x);
+		input_event(input, EV_ABS, ABS_Y, td->y);
+	}
+}
+
+
+static int mosart_event(struct hid_device *hid, struct hid_field *field,
+				struct hid_usage *usage, __s32 value)
+{
+	struct mosart_data *td = hid_get_drvdata(hid);
+
+	if (hid->claimed & HID_CLAIMED_INPUT) {
+		struct input_dev *input = field->hidinput->input;
+		switch (usage->hid) {
+		case HID_DG_INRANGE:
+			td->valid = !!value;
+			break;
+		case HID_GD_X:
+			td->x = value;
+			break;
+		case HID_GD_Y:
+			td->y = value;
+			mosart_filter_event(td, input);
+			break;
+		case HID_DG_CONTACTID:
+			td->id = value;
+			break;
+		case HID_DG_CONTACTCOUNT:
+			/* touch emulation: this is the last field in a frame */
+			td->first = false;
+			td->activity_now = false;
+			break;
+		case HID_DG_CONFIDENCE:
+		case HID_DG_TIPSWITCH:
+			/* avoid interference from generic hidinput handling */
+			break;
+
+		default:
+			/* fallback to the generic hidinput handling */
+			return 0;
+		}
+	}
+
+	/* we have handled the hidinput part, now remains hiddev */
+	if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+		hid->hiddev_hid_event(hid, field, usage, value);
+
+	return 1;
+}
+
+static int mosart_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret;
+	struct mosart_data *td;
+
+
+	td = kmalloc(sizeof(struct mosart_data), GFP_KERNEL);
+	if (!td) {
+		dev_err(&hdev->dev, "cannot allocate MosArt data\n");
+		return -ENOMEM;
+	}
+	td->valid = false;
+	td->activity = false;
+	td->activity_now = false;
+	td->first = false;
+	hid_set_drvdata(hdev, td);
+
+	/* currently, it's better to have one evdev device only */
+#if 0
+	hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+#endif
+
+	ret = hid_parse(hdev);
+	if (ret == 0)
+		ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+	if (ret == 0) {
+		struct hid_report_enum *re = hdev->report_enum
+						+ HID_FEATURE_REPORT;
+		struct hid_report *r = re->report_id_hash[7];
+
+		r->field[0]->value[0] = 0x02;
+		usbhid_submit_report(hdev, r, USB_DIR_OUT);
+	} else 
+		kfree(td);
+
+	return ret;
+}
+
+static void mosart_remove(struct hid_device *hdev)
+{
+	hid_hw_stop(hdev);
+	kfree(hid_get_drvdata(hdev));
+	hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id mosart_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, mosart_devices);
+
+static const struct hid_usage_id mosart_grabbed_usages[] = {
+	{ HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+	{ HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver mosart_driver = {
+	.name = "mosart",
+	.id_table = mosart_devices,
+	.probe = mosart_probe,
+	.remove = mosart_remove,
+	.input_mapping = mosart_input_mapping,
+	.input_mapped = mosart_input_mapped,
+	.usage_table = mosart_grabbed_usages,
+	.event = mosart_event,
+};
+
+static int __init mosart_init(void)
+{
+	return hid_register_driver(&mosart_driver);
+}
+
+static void __exit mosart_exit(void)
+{
+	hid_unregister_driver(&mosart_driver);
+}
+
+module_init(mosart_init);
+module_exit(mosart_exit);
+
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 49ce69d..3234c72 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -25,11 +25,16 @@
 					EV_KEY, (c))
 
 struct ntrig_data {
-	__s32 x, y, id, w, h;
-	char reading_a_point, found_contact_id;
-	char pen_active;
-	char finger_active;
-	char inverted;
+	/* Incoming raw values for a single contact */
+	__u16 x, y, w, h;
+	__u16 id;
+	__u8 confidence;
+
+	bool reading_mt;
+	__u8 first_contact_confidence;
+
+	__u8 mt_footer[4];
+	__u8 mt_foot_count;
 };
 
 /*
@@ -42,8 +47,11 @@
 		struct hid_field *field, struct hid_usage *usage,
 		unsigned long **bit, int *max)
 {
-	switch (usage->hid & HID_USAGE_PAGE) {
+	/* No special mappings needed for the pen and single touch */
+	if (field->physical)
+		return 0;
 
+	switch (usage->hid & HID_USAGE_PAGE) {
 	case HID_UP_GENDESK:
 		switch (usage->hid) {
 		case HID_GD_X:
@@ -66,18 +74,12 @@
 	case HID_UP_DIGITIZER:
 		switch (usage->hid) {
 		/* we do not want to map these for now */
-		case HID_DG_CONTACTID: /* value is useless */
+		case HID_DG_CONTACTID: /* Not trustworthy, squelch for now */
 		case HID_DG_INPUTMODE:
 		case HID_DG_DEVICEINDEX:
-		case HID_DG_CONTACTCOUNT:
 		case HID_DG_CONTACTMAX:
 			return -1;
 
-		/* original mapping by Rafi Rubin */
-		case HID_DG_CONFIDENCE:
-			nt_map_key_clear(BTN_TOOL_DOUBLETAP);
-			return 1;
-
 		/* width/height mapped on TouchMajor/TouchMinor/Orientation */
 		case HID_DG_WIDTH:
 			hid_map_usage(hi, usage, bit, max,
@@ -104,6 +106,10 @@
 		struct hid_field *field, struct hid_usage *usage,
 		unsigned long **bit, int *max)
 {
+	/* No special mappings needed for the pen and single touch */
+	if (field->physical)
+		return 0;
+
 	if (usage->type == EV_KEY || usage->type == EV_REL
 			|| usage->type == EV_ABS)
 		clear_bit(usage->code, *bit);
@@ -123,31 +129,30 @@
 	struct input_dev *input = field->hidinput->input;
 	struct ntrig_data *nd = hid_get_drvdata(hid);
 
+	/* No special handling needed for the pen */
+	if (field->application == HID_DG_PEN)
+		return 0;
+
         if (hid->claimed & HID_CLAIMED_INPUT) {
 		switch (usage->hid) {
-
-		case HID_DG_INRANGE:
-			if (field->application & 0x3)
-				nd->pen_active = (value != 0);
-			else
-				nd->finger_active = (value != 0);
-			return 0;
-
-		case HID_DG_INVERT:
-			nd->inverted = value;
-			return 0;
-
+		case 0xff000001:
+			/* Tag indicating the start of a multitouch group */
+			nd->reading_mt = 1;
+			nd->first_contact_confidence = 0;
+			break;
+		case HID_DG_CONFIDENCE:
+			nd->confidence = value;
+			break;
 		case HID_GD_X:
 			nd->x = value;
-			nd->reading_a_point = 1;
+			/* Clear the contact footer */
+			nd->mt_foot_count = 0;
 			break;
 		case HID_GD_Y:
 			nd->y = value;
 			break;
 		case HID_DG_CONTACTID:
 			nd->id = value;
-			/* we receive this only when in multitouch mode */
-			nd->found_contact_id = 1;
 			break;
 		case HID_DG_WIDTH:
 			nd->w = value;
@@ -159,35 +164,13 @@
 			 * report received in a finger event. We want
 			 * to emit a normal (X, Y) position
 			 */
-			if (!nd->found_contact_id) {
-				if (nd->pen_active && nd->finger_active) {
-					input_report_key(input, BTN_TOOL_DOUBLETAP, 0);
-					input_report_key(input, BTN_TOOL_DOUBLETAP, 1);
-				}
+			if (!nd->reading_mt) {
+				input_report_key(input, BTN_TOOL_DOUBLETAP,
+						 (nd->confidence != 0));
 				input_event(input, EV_ABS, ABS_X, nd->x);
 				input_event(input, EV_ABS, ABS_Y, nd->y);
 			}
 			break;
-		case HID_DG_TIPPRESSURE:
-			/*
-			 * when in single touch mode, this is the last
-			 * report received in a pen event. We want
-			 * to emit a normal (X, Y) position
-			 */
-			if (! nd->found_contact_id) {
-				if (nd->pen_active && nd->finger_active) {
-					input_report_key(input,
-							nd->inverted ? BTN_TOOL_RUBBER : BTN_TOOL_PEN
-							, 0);
-					input_report_key(input,
-							nd->inverted ? BTN_TOOL_RUBBER : BTN_TOOL_PEN
-							, 1);
-				}
-				input_event(input, EV_ABS, ABS_X, nd->x);
-				input_event(input, EV_ABS, ABS_Y, nd->y);
-				input_event(input, EV_ABS, ABS_PRESSURE, value);
-			}
-			break;
 		case 0xff000002:
 			/*
 			 * we receive this when the device is in multitouch
@@ -195,10 +178,34 @@
 			 * this usage tells if the contact point is real
 			 * or a placeholder
 			 */
-			if (!nd->reading_a_point || value != 1)
+
+			/* Shouldn't get more than 4 footer packets, so skip */
+			if (nd->mt_foot_count >= 4)
 				break;
+
+			nd->mt_footer[nd->mt_foot_count++] = value;
+
+			/* if the footer isn't complete break */
+			if (nd->mt_foot_count != 4)
+				break;
+
+			/* Pen activity signal, trigger end of touch. */
+			if (nd->mt_footer[2]) {
+				nd->confidence = 0;
+				break;
+			}
+
+			/* If the contact was invalid */
+			if (!(nd->confidence && nd->mt_footer[0])
+					|| nd->w <= 250
+					|| nd->h <= 190) {
+				nd->confidence = 0;
+				break;
+			}
+
 			/* emit a normal (X, Y) for the first point only */
 			if (nd->id == 0) {
+				nd->first_contact_confidence = nd->confidence;
 				input_event(input, EV_ABS, ABS_X, nd->x);
 				input_event(input, EV_ABS, ABS_Y, nd->y);
 			}
@@ -220,8 +227,39 @@
 						ABS_MT_TOUCH_MINOR, nd->w);
 			}
 			input_mt_sync(field->hidinput->input);
-			nd->reading_a_point = 0;
-			nd->found_contact_id = 0;
+			break;
+
+		case HID_DG_CONTACTCOUNT: /* End of a multitouch group */
+			if (!nd->reading_mt)
+				break;
+
+			nd->reading_mt = 0;
+
+			if (nd->first_contact_confidence) {
+				switch (value) {
+				case 0:	/* for single touch devices */
+				case 1:
+					input_report_key(input,
+							BTN_TOOL_DOUBLETAP, 1);
+					break;
+				case 2:
+					input_report_key(input,
+							BTN_TOOL_TRIPLETAP, 1);
+					break;
+				case 3:
+				default:
+					input_report_key(input,
+							BTN_TOOL_QUADTAP, 1);
+				}
+				input_report_key(input, BTN_TOUCH, 1);
+			} else {
+				input_report_key(input,
+						BTN_TOOL_DOUBLETAP, 0);
+				input_report_key(input,
+						BTN_TOOL_TRIPLETAP, 0);
+				input_report_key(input,
+						BTN_TOOL_QUADTAP, 0);
+			}
 			break;
 
 		default:
@@ -231,8 +269,8 @@
 	}
 
 	/* we have handled the hidinput part, now remains hiddev */
-        if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
-                hid->hiddev_hid_event(hid, field, usage, value);
+	if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_hid_event)
+		hid->hiddev_hid_event(hid, field, usage, value);
 
 	return 1;
 }
@@ -241,23 +279,67 @@
 {
 	int ret;
 	struct ntrig_data *nd;
+	struct hid_input *hidinput;
+	struct input_dev *input;
+
+	if (id->driver_data)
+		hdev->quirks |= HID_QUIRK_MULTI_INPUT;
 
 	nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL);
 	if (!nd) {
 		dev_err(&hdev->dev, "cannot allocate N-Trig data\n");
 		return -ENOMEM;
 	}
-	nd->reading_a_point = 0;
-	nd->found_contact_id = 0;
+
+	nd->reading_mt = 0;
 	hid_set_drvdata(hdev, nd);
 
 	ret = hid_parse(hdev);
-	if (!ret)
-		ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (ret) {
+		dev_err(&hdev->dev, "parse failed\n");
+		goto err_free;
+	}
 
-	if (ret)
-		kfree (nd);
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+	if (ret) {
+		dev_err(&hdev->dev, "hw start failed\n");
+		goto err_free;
+	}
 
+
+	list_for_each_entry(hidinput, &hdev->inputs, list) {
+		input = hidinput->input;
+		switch (hidinput->report->field[0]->application) {
+		case HID_DG_PEN:
+			input->name = "N-Trig Pen";
+			break;
+		case HID_DG_TOUCHSCREEN:
+			__clear_bit(BTN_TOOL_PEN, input->keybit);
+			/*
+			 * A little something special to enable
+			 * two and three finger taps.
+			 */
+			__set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+			__set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+			__set_bit(BTN_TOOL_QUADTAP, input->keybit);
+			/*
+			 * The physical touchscreen (single touch)
+			 * input has a value for physical, whereas
+			 * the multitouch only has logical input
+			 * fields.
+			 */
+			input->name =
+				(hidinput->report->field[0]
+				 ->physical) ?
+				"N-Trig Touchscreen" :
+				"N-Trig MultiTouch";
+			break;
+		}
+	}
+
+	return 0;
+err_free:
+	kfree(nd);
 	return ret;
 }
 
@@ -276,7 +358,7 @@
 
 static const struct hid_usage_id ntrig_grabbed_usages[] = {
 	{ HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
-	{ HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+	{ HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1 }
 };
 
 static struct hid_driver ntrig_driver = {
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
new file mode 100644
index 0000000..aa9a960
--- /dev/null
+++ b/drivers/hid/hid-ortek.c
@@ -0,0 +1,56 @@
+/*
+ *  HID driver for Ortek WKB-2000 (wireless keyboard + mouse trackpad).
+ *  Fixes LogicalMaximum error in USB report description, see
+ *  http://bugzilla.kernel.org/show_bug.cgi?id=14787
+ *
+ *  Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+static void ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+		unsigned int rsize)
+{
+	if (rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
+		dev_info(&hdev->dev, "Fixing up Ortek WKB-2000 "
+				"report descriptor.\n");
+		rdesc[55] = 0x92;
+	}
+}
+
+static const struct hid_device_id ortek_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, ortek_devices);
+
+static struct hid_driver ortek_driver = {
+	.name = "ortek",
+	.id_table = ortek_devices,
+	.report_fixup = ortek_report_fixup
+};
+
+static int __init ortek_init(void)
+{
+	return hid_register_driver(&ortek_driver);
+}
+
+static void __exit ortek_exit(void)
+{
+	hid_unregister_driver(&ortek_driver);
+}
+
+module_init(ortek_init);
+module_exit(ortek_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-quanta.c b/drivers/hid/hid-quanta.c
new file mode 100644
index 0000000..01dd51c
--- /dev/null
+++ b/drivers/hid/hid-quanta.c
@@ -0,0 +1,260 @@
+/*
+ *  HID driver for Quanta Optical Touch dual-touch panels
+ *
+ *  Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("Quanta dual-touch panel");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct quanta_data {
+	__u16 x, y;
+	__u8 id;
+	bool valid;		/* valid finger data, or just placeholder? */
+	bool first;		/* is this the first finger in this frame? */
+	bool activity_now;	/* at least one active finger in this frame? */
+	bool activity;		/* at least one active finger previously? */
+};
+
+static int quanta_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	switch (usage->hid & HID_USAGE_PAGE) {
+
+	case HID_UP_GENDESK:
+		switch (usage->hid) {
+		case HID_GD_X:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_X);
+			/* touchscreen emulation */
+			input_set_abs_params(hi->input, ABS_X,
+						field->logical_minimum,
+						field->logical_maximum, 0, 0);
+			return 1;
+		case HID_GD_Y:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_Y);
+			/* touchscreen emulation */
+			input_set_abs_params(hi->input, ABS_Y,
+						field->logical_minimum,
+						field->logical_maximum, 0, 0);
+			return 1;
+		}
+		return 0;
+
+	case HID_UP_DIGITIZER:
+		switch (usage->hid) {
+		case HID_DG_CONFIDENCE:
+		case HID_DG_TIPSWITCH:
+		case HID_DG_INPUTMODE:
+		case HID_DG_DEVICEINDEX:
+		case HID_DG_CONTACTCOUNT:
+		case HID_DG_CONTACTMAX:
+		case HID_DG_TIPPRESSURE:
+		case HID_DG_WIDTH:
+		case HID_DG_HEIGHT:
+			return -1;
+		case HID_DG_INRANGE:
+			/* touchscreen emulation */
+			hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+			return 1;
+		case HID_DG_CONTACTID:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TRACKING_ID);
+			return 1;
+		}
+		return 0;
+
+	case 0xff000000:
+		/* ignore vendor-specific features */
+		return -1;
+	}
+
+	return 0;
+}
+
+static int quanta_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	if (usage->type == EV_KEY || usage->type == EV_ABS)
+		clear_bit(usage->code, *bit);
+
+	return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void quanta_filter_event(struct quanta_data *td, struct input_dev *input)
+{
+	
+	td->first = !td->first; /* touchscreen emulation */
+
+	if (!td->valid) {
+		/*
+		 * touchscreen emulation: if no finger in this frame is valid
+		 * and there previously was finger activity, this is a release
+		 */ 
+		if (!td->first && !td->activity_now && td->activity) {
+			input_event(input, EV_KEY, BTN_TOUCH, 0);
+			td->activity = false;
+		}
+		return;
+	}
+
+	input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
+	input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+	input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+
+	input_mt_sync(input);
+	td->valid = false;
+
+	/* touchscreen emulation: if first active finger in this frame... */
+	if (!td->activity_now) {
+		/* if there was no previous activity, emit touch event */
+		if (!td->activity) {
+			input_event(input, EV_KEY, BTN_TOUCH, 1);
+			td->activity = true;
+		}
+		td->activity_now = true;
+		/* and in any case this is our preferred finger */
+		input_event(input, EV_ABS, ABS_X, td->x);
+		input_event(input, EV_ABS, ABS_Y, td->y);
+	}
+}
+
+
+static int quanta_event(struct hid_device *hid, struct hid_field *field,
+				struct hid_usage *usage, __s32 value)
+{
+	struct quanta_data *td = hid_get_drvdata(hid);
+
+	if (hid->claimed & HID_CLAIMED_INPUT) {
+		struct input_dev *input = field->hidinput->input;
+
+		switch (usage->hid) {
+		case HID_DG_INRANGE:
+			td->valid = !!value;
+			break;
+		case HID_GD_X:
+			td->x = value;
+			break;
+		case HID_GD_Y:
+			td->y = value;
+			quanta_filter_event(td, input);
+			break;
+		case HID_DG_CONTACTID:
+			td->id = value;
+			break;
+		case HID_DG_CONTACTCOUNT:
+			/* touch emulation: this is the last field in a frame */
+			td->first = false;
+			td->activity_now = false;
+			break;
+		case HID_DG_CONFIDENCE:
+		case HID_DG_TIPSWITCH:
+			/* avoid interference from generic hidinput handling */
+			break;
+
+		default:
+			/* fallback to the generic hidinput handling */
+			return 0;
+		}
+	}
+
+	/* we have handled the hidinput part, now remains hiddev */
+	if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+		hid->hiddev_hid_event(hid, field, usage, value);
+
+	return 1;
+}
+
+static int quanta_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret;
+	struct quanta_data *td;
+
+	td = kmalloc(sizeof(struct quanta_data), GFP_KERNEL);
+	if (!td) {
+		dev_err(&hdev->dev, "cannot allocate Quanta Touch data\n");
+		return -ENOMEM;
+	}
+	td->valid = false;
+	td->activity = false;
+	td->activity_now = false;
+	td->first = false;
+	hid_set_drvdata(hdev, td);
+
+	ret = hid_parse(hdev);
+	if (!ret)
+		ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+	if (ret)
+		kfree(td);
+
+	return ret;
+}
+
+static void quanta_remove(struct hid_device *hdev)
+{
+	hid_hw_stop(hdev);
+	kfree(hid_get_drvdata(hdev));
+	hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id quanta_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+			USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+			USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, quanta_devices);
+
+static const struct hid_usage_id quanta_grabbed_usages[] = {
+	{ HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+	{ HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver quanta_driver = {
+	.name = "quanta-touch",
+	.id_table = quanta_devices,
+	.probe = quanta_probe,
+	.remove = quanta_remove,
+	.input_mapping = quanta_input_mapping,
+	.input_mapped = quanta_input_mapped,
+	.usage_table = quanta_grabbed_usages,
+	.event = quanta_event,
+};
+
+static int __init quanta_init(void)
+{
+	return hid_register_driver(&quanta_driver);
+}
+
+static void __exit quanta_exit(void)
+{
+	hid_unregister_driver(&quanta_driver);
+}
+
+module_init(quanta_init);
+module_exit(quanta_exit);
+
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 4e84502..9bf00d7 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -48,7 +48,7 @@
  * to "operational".  Without this, the ps3 controller will not report any
  * events.
  */
-static int sony_set_operational(struct hid_device *hdev)
+static int sony_set_operational_usb(struct hid_device *hdev)
 {
 	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
 	struct usb_device *dev = interface_to_usbdev(intf);
@@ -73,6 +73,12 @@
 	return ret;
 }
 
+static int sony_set_operational_bt(struct hid_device *hdev)
+{
+	unsigned char buf[] = { 0x53, 0xf4,  0x42, 0x03, 0x00, 0x00 };
+	return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
+}
+
 static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
 	int ret;
@@ -81,7 +87,7 @@
 
 	sc = kzalloc(sizeof(*sc), GFP_KERNEL);
 	if (sc == NULL) {
-		dev_err(&hdev->dev, "can't alloc apple descriptor\n");
+		dev_err(&hdev->dev, "can't alloc sony descriptor\n");
 		return -ENOMEM;
 	}
 
@@ -101,7 +107,17 @@
 		goto err_free;
 	}
 
-	ret = sony_set_operational(hdev);
+	switch (hdev->bus) {
+	case BUS_USB:
+		ret = sony_set_operational_usb(hdev);
+		break;
+	case BUS_BLUETOOTH:
+		ret = sony_set_operational_bt(hdev);
+		break;
+	default:
+		ret = 0;
+	}
+
 	if (ret < 0)
 		goto err_stop;
 
@@ -121,6 +137,7 @@
 
 static const struct hid_device_id sony_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE),
 		.driver_data = VAIO_RDESC_CONSTANT },
 	{ }
diff --git a/drivers/hid/hid-stantum.c b/drivers/hid/hid-stantum.c
new file mode 100644
index 0000000..2e592a0
--- /dev/null
+++ b/drivers/hid/hid-stantum.c
@@ -0,0 +1,283 @@
+/*
+ *  HID driver for Stantum multitouch panels
+ *
+ *  Copyright (c) 2009 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("Stantum HID multitouch panels");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct stantum_data {
+	__s32 x, y, z, w, h;	/* x, y, pressure, width, height */
+	__u16 id;		/* touch id */
+	bool valid;		/* valid finger data, or just placeholder? */
+	bool first;		/* first finger in the HID packet? */
+	bool activity;		/* at least one active finger so far? */
+};
+
+static int stantum_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	switch (usage->hid & HID_USAGE_PAGE) {
+
+	case HID_UP_GENDESK:
+		switch (usage->hid) {
+		case HID_GD_X:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_X);
+			/* touchscreen emulation */
+			input_set_abs_params(hi->input, ABS_X,
+						field->logical_minimum,
+						field->logical_maximum, 0, 0);
+			return 1;
+		case HID_GD_Y:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_Y);
+			/* touchscreen emulation */
+			input_set_abs_params(hi->input, ABS_Y,
+						field->logical_minimum,
+						field->logical_maximum, 0, 0);
+			return 1;
+		}
+		return 0;
+
+	case HID_UP_DIGITIZER:
+		switch (usage->hid) {
+		case HID_DG_INRANGE:
+		case HID_DG_CONFIDENCE:
+		case HID_DG_INPUTMODE:
+		case HID_DG_DEVICEINDEX:
+		case HID_DG_CONTACTCOUNT:
+		case HID_DG_CONTACTMAX:
+			return -1;
+
+		case HID_DG_TIPSWITCH:
+			/* touchscreen emulation */
+			hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+			return 1;
+
+		case HID_DG_WIDTH:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TOUCH_MAJOR);
+			return 1;
+		case HID_DG_HEIGHT:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TOUCH_MINOR);
+			input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
+					1, 1, 0, 0);
+			return 1;
+		case HID_DG_TIPPRESSURE:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_PRESSURE);
+			return 1;
+
+		case HID_DG_CONTACTID:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TRACKING_ID);
+			return 1;
+
+		}
+		return 0;
+
+	case 0xff000000:
+		/* no input-oriented meaning */
+		return -1;
+	}
+
+	return 0;
+}
+
+static int stantum_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	if (usage->type == EV_KEY || usage->type == EV_ABS)
+		clear_bit(usage->code, *bit);
+
+	return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void stantum_filter_event(struct stantum_data *sd,
+					struct input_dev *input)
+{
+	bool wide;
+
+	if (!sd->valid) {
+		/*
+		 * touchscreen emulation: if the first finger is not valid and
+		 * there previously was finger activity, this is a release
+		 */
+		if (sd->first && sd->activity) {
+			input_event(input, EV_KEY, BTN_TOUCH, 0);
+			sd->activity = false;
+		}
+		return;
+	}
+
+	input_event(input, EV_ABS, ABS_MT_TRACKING_ID, sd->id);
+	input_event(input, EV_ABS, ABS_MT_POSITION_X, sd->x);
+	input_event(input, EV_ABS, ABS_MT_POSITION_Y, sd->y);
+
+	wide = (sd->w > sd->h);
+	input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
+	input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, wide ? sd->w : sd->h);
+	input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, wide ? sd->h : sd->w);
+
+	input_event(input, EV_ABS, ABS_MT_PRESSURE, sd->z);
+
+	input_mt_sync(input);
+	sd->valid = false;
+
+	/* touchscreen emulation */
+	if (sd->first) {
+		if (!sd->activity) {
+			input_event(input, EV_KEY, BTN_TOUCH, 1);
+			sd->activity = true;
+		}
+		input_event(input, EV_ABS, ABS_X, sd->x);
+		input_event(input, EV_ABS, ABS_Y, sd->y);
+	}
+	sd->first = false;
+}
+
+
+static int stantum_event(struct hid_device *hid, struct hid_field *field,
+				struct hid_usage *usage, __s32 value)
+{
+	struct stantum_data *sd = hid_get_drvdata(hid);
+
+	if (hid->claimed & HID_CLAIMED_INPUT) {
+		struct input_dev *input = field->hidinput->input;
+
+		switch (usage->hid) {
+		case HID_DG_INRANGE:
+			/* this is the last field in a finger */
+			stantum_filter_event(sd, input);
+			break;
+		case HID_DG_WIDTH:
+			sd->w = value;
+			break;
+		case HID_DG_HEIGHT:
+			sd->h = value;
+			break;
+		case HID_GD_X:
+			sd->x = value;
+			break;
+		case HID_GD_Y:
+			sd->y = value;
+			break;
+		case HID_DG_TIPPRESSURE:
+			sd->z = value;
+			break;
+		case HID_DG_CONTACTID:
+			sd->id = value;
+			break;
+		case HID_DG_CONFIDENCE:
+			sd->valid = !!value;
+			break;
+		case 0xff000002:
+			/* this comes only before the first finger */
+			sd->first = true;
+			break;
+
+		default:
+			/* ignore the others */
+			return 1;
+		}
+	}
+
+	/* we have handled the hidinput part, now remains hiddev */
+	if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+		hid->hiddev_hid_event(hid, field, usage, value);
+
+	return 1;
+}
+
+static int stantum_probe(struct hid_device *hdev,
+				const struct hid_device_id *id)
+{
+	int ret;
+	struct stantum_data *sd;
+
+	sd = kmalloc(sizeof(struct stantum_data), GFP_KERNEL);
+	if (!sd) {
+		dev_err(&hdev->dev, "cannot allocate Stantum data\n");
+		return -ENOMEM;
+	}
+	sd->valid = false;
+	sd->first = false;
+	sd->activity = false;
+	hid_set_drvdata(hdev, sd);
+
+	ret = hid_parse(hdev);
+	if (!ret)
+		ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+	if (ret)
+		kfree(sd);
+
+	return ret;
+}
+
+static void stantum_remove(struct hid_device *hdev)
+{
+	hid_hw_stop(hdev);
+	kfree(hid_get_drvdata(hdev));
+	hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id stantum_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, stantum_devices);
+
+static const struct hid_usage_id stantum_grabbed_usages[] = {
+	{ HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+	{ HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver stantum_driver = {
+	.name = "stantum",
+	.id_table = stantum_devices,
+	.probe = stantum_probe,
+	.remove = stantum_remove,
+	.input_mapping = stantum_input_mapping,
+	.input_mapped = stantum_input_mapped,
+	.usage_table = stantum_grabbed_usages,
+	.event = stantum_event,
+};
+
+static int __init stantum_init(void)
+{
+	return hid_register_driver(&stantum_driver);
+}
+
+static void __exit stantum_exit(void)
+{
+	hid_unregister_driver(&stantum_driver);
+}
+
+module_init(stantum_init);
+module_exit(stantum_exit);
+
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 12dcda5..8d3b46f 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -156,7 +156,9 @@
 	struct hid_input *hidinput;
 	struct input_dev *input;
 	struct wacom_data *wdata;
+	char rep_data[2];
 	int ret;
+	int limit;
 
 	wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
 	if (wdata == NULL) {
@@ -166,6 +168,7 @@
 
 	hid_set_drvdata(hdev, wdata);
 
+	/* Parse the HID report now */
 	ret = hid_parse(hdev);
 	if (ret) {
 		dev_err(&hdev->dev, "parse failed\n");
@@ -178,6 +181,31 @@
 		goto err_free;
 	}
 
+	/*
+	 * Note that if the raw queries fail, it's not a hard failure and it
+	 * is safe to continue
+	 */
+
+	/* Set Wacom mode2 */
+	rep_data[0] = 0x03; rep_data[1] = 0x00;
+	limit = 3;
+	do {
+		ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+				HID_FEATURE_REPORT);
+	} while (ret < 0 && limit-- > 0);
+	if (ret < 0)
+		dev_warn(&hdev->dev, "failed to poke device #1, %d\n", ret);
+
+	/* 0x06 - high reporting speed, 0x05 - low speed */
+	rep_data[0] = 0x06; rep_data[1] = 0x00;
+	limit = 3;
+	do {
+		ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+				HID_FEATURE_REPORT);
+	} while (ret < 0 && limit-- > 0);
+	if (ret < 0)
+		dev_warn(&hdev->dev, "failed to poke device #2, %d\n", ret);
+
 	hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
 	input = hidinput->input;
 
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index cdd1369..d044767 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -134,7 +134,7 @@
 		goto out;
 	}
 
-	ret = dev->hid_output_raw_report(dev, buf, count);
+	ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT);
 out:
 	kfree(buf);
 	return ret;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index e2997a8..56d06cd 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -5,7 +5,7 @@
  *  Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
  *  Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
  *  Copyright (c) 2007-2008 Oliver Neukum
- *  Copyright (c) 2006-2009 Jiri Kosina
+ *  Copyright (c) 2006-2010 Jiri Kosina
  */
 
 /*
@@ -316,6 +316,7 @@
 			err_hid("usb_submit_urb(out) failed");
 			return -1;
 		}
+		usbhid->last_out = jiffies;
 	} else {
 		/*
 		 * queue work to wake up the device.
@@ -377,6 +378,7 @@
 			err_hid("usb_submit_urb(ctrl) failed");
 			return -1;
 		}
+		usbhid->last_ctrl = jiffies;
 	} else {
 		/*
 		 * queue work to wake up the device.
@@ -512,9 +514,20 @@
 		usbhid->out[usbhid->outhead].report = report;
 		usbhid->outhead = head;
 
-		if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl))
+		if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) {
 			if (hid_submit_out(hid))
 				clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
+		} else {
+			/*
+			 * the queue is known to run
+			 * but an earlier request may be stuck
+			 * we may need to time out
+			 * no race because this is called under
+			 * spinlock
+			 */
+			if (time_after(jiffies, usbhid->last_out + HZ * 5))
+				usb_unlink_urb(usbhid->urbout);
+		}
 		return;
 	}
 
@@ -535,9 +548,20 @@
 	usbhid->ctrl[usbhid->ctrlhead].dir = dir;
 	usbhid->ctrlhead = head;
 
-	if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+	if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) {
 		if (hid_submit_ctrl(hid))
 			clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
+	} else {
+		/*
+		 * the queue is known to run
+		 * but an earlier request may be stuck
+		 * we may need to time out
+		 * no race because this is called under
+		 * spinlock
+		 */
+		if (time_after(jiffies, usbhid->last_ctrl + HZ * 5))
+			usb_unlink_urb(usbhid->urbctrl);
+	}
 }
 
 void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir)
@@ -774,7 +798,8 @@
 	return 0;
 }
 
-static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count)
+static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count,
+		unsigned char report_type)
 {
 	struct usbhid_device *usbhid = hid->driver_data;
 	struct usb_device *dev = hid_to_usb_dev(hid);
@@ -785,7 +810,7 @@
 	ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
 		HID_REQ_SET_REPORT,
 		USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-		((HID_OUTPUT_REPORT + 1) << 8) | *buf,
+		((report_type + 1) << 8) | *buf,
 		interface->desc.bInterfaceNumber, buf + 1, count - 1,
 		USB_CTRL_SET_TIMEOUT);
 
@@ -981,9 +1006,6 @@
 
 	spin_lock_init(&usbhid->lock);
 
-	usbhid->intf = intf;
-	usbhid->ifnum = interface->desc.bInterfaceNumber;
-
 	usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL);
 	if (!usbhid->urbctrl) {
 		ret = -ENOMEM;
@@ -1154,6 +1176,8 @@
 
 	hid->driver_data = usbhid;
 	usbhid->hid = hid;
+	usbhid->intf = intf;
+	usbhid->ifnum = interface->desc.bInterfaceNumber;
 
 	ret = hid_add_device(hid);
 	if (ret) {
@@ -1342,7 +1366,7 @@
 
 #endif /* CONFIG_PM */
 
-static struct usb_device_id hid_usb_ids [] = {
+static const struct usb_device_id hid_usb_ids[] = {
 	{ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
 		.bInterfaceClass = USB_INTERFACE_CLASS_HID },
 	{ }						/* Terminating entry */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 38773dc..7844280 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -43,8 +43,10 @@
 
 	{ USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
 
+	{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
 	{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
+	{ USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
 
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
@@ -57,6 +59,7 @@
 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+	{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 08f505c..ec20400 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -80,12 +80,14 @@
 	unsigned char ctrlhead, ctrltail;                               /* Control fifo head & tail */
 	char *ctrlbuf;                                                  /* Control buffer */
 	dma_addr_t ctrlbuf_dma;                                         /* Control buffer dma */
+	unsigned long last_ctrl;						/* record of last output for timeouts */
 
 	struct urb *urbout;                                             /* Output URB */
 	struct hid_output_fifo out[HID_CONTROL_FIFO_SIZE];              /* Output pipe fifo */
 	unsigned char outhead, outtail;                                 /* Output pipe fifo head & tail */
 	char *outbuf;                                                   /* Output buffer */
 	dma_addr_t outbuf_dma;                                          /* Output buffer dma */
+	unsigned long last_out;							/* record of last output for timeouts */
 
 	spinlock_t lock;						/* fifo spinlock */
 	unsigned long iofl;                                             /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index 3464ebc..452fde9 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -109,7 +109,7 @@
 
 config HISAX_TELESPCI
 	bool "Teles PCI"
-	depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
+	depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
 	help
 	  This enables HiSax support for the Teles PCI.
 	  See <file:Documentation/isdn/README.HiSax> on how to configure it.
@@ -237,7 +237,7 @@
 
 config HISAX_NETJET
 	bool "NETjet card"
-	depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
+	depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
 	help
 	  This enables HiSax support for the NetJet from Traverse
 	  Technologies.
@@ -248,7 +248,7 @@
 
 config HISAX_NETJET_U
 	bool "NETspider U card"
-	depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
+	depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
 	help
 	  This enables HiSax support for the Netspider U interface ISDN card
 	  from Traverse Technologies.
@@ -287,7 +287,7 @@
 
 config HISAX_BKM_A4T
 	bool "Telekom A4T card"
-	depends on PCI && PCI_LEGACY
+	depends on PCI
 	help
 	  This enables HiSax support for the Telekom A4T card.
 
@@ -297,7 +297,7 @@
 
 config HISAX_SCT_QUADRO
 	bool "Scitel Quadro card"
-	depends on PCI && PCI_LEGACY
+	depends on PCI
 	help
 	  This enables HiSax support for the Scitel Quadro card.
 
@@ -316,7 +316,7 @@
 
 config HISAX_HFC_PCI
 	bool "HFC PCI-Bus cards"
-	depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
+	depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
 	help
 	  This enables HiSax support for the HFC-S PCI 2BDS0 based cards.
 
@@ -325,7 +325,7 @@
 
 config HISAX_W6692
 	bool "Winbond W6692 based cards"
-	depends on PCI && PCI_LEGACY
+	depends on PCI
 	help
 	  This enables HiSax support for Winbond W6692 based PCI ISDN cards.
 
@@ -341,7 +341,7 @@
 
 config HISAX_ENTERNOW_PCI
 	bool "Formula-n enter:now PCI card"
-	depends on HISAX_NETJET && PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
+	depends on HISAX_NETJET && PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
 	help
 	  This enables HiSax support for the Formula-n enter:now PCI
 	  ISDN card.
@@ -412,7 +412,7 @@
 
 config HISAX_FRITZ_PCIPNP
 	tristate "AVM Fritz!Card PCI/PCIv2/PnP support (EXPERIMENTAL)"
-	depends on PCI && PCI_LEGACY && EXPERIMENTAL
+	depends on PCI && EXPERIMENTAL
 	help
 	  This enables the driver for the AVM Fritz!Card PCI,
 	  Fritz!Card PCI v2 and Fritz!Card PnP.
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index 7cabc5a..14295a1 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -822,7 +822,7 @@
 
 #endif /* __ISAPNP__ */
 
-#ifndef CONFIG_PCI_LEGACY
+#ifndef CONFIG_PCI
 
 static int __devinit avm_pci_setup(struct IsdnCardState *cs)
 {
@@ -835,7 +835,7 @@
 
 static int __devinit avm_pci_setup(struct IsdnCardState *cs)
 {
-	if ((dev_avm = pci_find_device(PCI_VENDOR_ID_AVM,
+	if ((dev_avm = hisax_find_pci_device(PCI_VENDOR_ID_AVM,
 		PCI_DEVICE_ID_AVM_A1, dev_avm))) {
 
 		if (pci_enable_device(dev_avm))
@@ -864,7 +864,7 @@
 	return (1);
 }
 
-#endif /* CONFIG_PCI_LEGACY */
+#endif /* CONFIG_PCI */
 
 int __devinit
 setup_avm_pcipnp(struct IsdnCard *card)
diff --git a/drivers/isdn/hisax/bkm_a4t.c b/drivers/isdn/hisax/bkm_a4t.c
index 9ca2ee5..9f2009c 100644
--- a/drivers/isdn/hisax/bkm_a4t.c
+++ b/drivers/isdn/hisax/bkm_a4t.c
@@ -340,7 +340,7 @@
 	} else
 		return (0);
 
-	while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN,
+	while ((dev_a4t = hisax_find_pci_device(PCI_VENDOR_ID_ZORAN,
 		PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
 		ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr);
 		if (!ret)
diff --git a/drivers/isdn/hisax/bkm_a8.c b/drivers/isdn/hisax/bkm_a8.c
index e1ff471..e775706 100644
--- a/drivers/isdn/hisax/bkm_a8.c
+++ b/drivers/isdn/hisax/bkm_a8.c
@@ -301,7 +301,7 @@
 		(sub_vendor_id != PCI_VENDOR_ID_BERKOM)))
 		return (0);
 	if (cs->subtyp == SCT_1) {
-		while ((dev_a8 = pci_find_device(PCI_VENDOR_ID_PLX,
+		while ((dev_a8 = hisax_find_pci_device(PCI_VENDOR_ID_PLX,
 			PCI_DEVICE_ID_PLX_9050, dev_a8))) {
 			
 			sub_vendor_id = dev_a8->subsystem_vendor;
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 0b0c2e5..780da9b 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -1148,7 +1148,7 @@
 
 #endif	/* ISAPNP */
 
-#ifdef CONFIG_PCI_LEGACY
+#ifdef CONFIG_PCI
 static struct pci_dev *dev_diva __devinitdata = NULL;
 static struct pci_dev *dev_diva_u __devinitdata = NULL;
 static struct pci_dev *dev_diva201 __devinitdata = NULL;
@@ -1159,21 +1159,21 @@
 	struct IsdnCardState *cs = card->cs;
 
 	cs->subtyp = 0;
-	if ((dev_diva = pci_find_device(PCI_VENDOR_ID_EICON,
+	if ((dev_diva = hisax_find_pci_device(PCI_VENDOR_ID_EICON,
 		PCI_DEVICE_ID_EICON_DIVA20, dev_diva))) {
 		if (pci_enable_device(dev_diva))
 			return(0);
 		cs->subtyp = DIVA_PCI;
 		cs->irq = dev_diva->irq;
 		cs->hw.diva.cfg_reg = pci_resource_start(dev_diva, 2);
-	} else if ((dev_diva_u = pci_find_device(PCI_VENDOR_ID_EICON,
+	} else if ((dev_diva_u = hisax_find_pci_device(PCI_VENDOR_ID_EICON,
 		PCI_DEVICE_ID_EICON_DIVA20_U, dev_diva_u))) {
 		if (pci_enable_device(dev_diva_u))
 			return(0);
 		cs->subtyp = DIVA_PCI;
 		cs->irq = dev_diva_u->irq;
 		cs->hw.diva.cfg_reg = pci_resource_start(dev_diva_u, 2);
-	} else if ((dev_diva201 = pci_find_device(PCI_VENDOR_ID_EICON,
+	} else if ((dev_diva201 = hisax_find_pci_device(PCI_VENDOR_ID_EICON,
 		PCI_DEVICE_ID_EICON_DIVA201, dev_diva201))) {
 		if (pci_enable_device(dev_diva201))
 			return(0);
@@ -1183,7 +1183,7 @@
 			(ulong) ioremap(pci_resource_start(dev_diva201, 0), 4096);
 		cs->hw.diva.cfg_reg =
 			(ulong) ioremap(pci_resource_start(dev_diva201, 1), 4096);
-	} else if ((dev_diva202 = pci_find_device(PCI_VENDOR_ID_EICON,
+	} else if ((dev_diva202 = hisax_find_pci_device(PCI_VENDOR_ID_EICON,
 		PCI_DEVICE_ID_EICON_DIVA202, dev_diva202))) {
 		if (pci_enable_device(dev_diva202))
 			return(0);
@@ -1229,14 +1229,14 @@
 	return (1);		/* card found */
 }
 
-#else	/* if !CONFIG_PCI_LEGACY */
+#else	/* if !CONFIG_PCI */
 
 static int __devinit setup_diva_pci(struct IsdnCard *card)
 {
 	return (-1);	/* card not found; continue search */
 }
 
-#endif	/* CONFIG_PCI_LEGACY */
+#endif	/* CONFIG_PCI */
 
 int __devinit
 setup_diva(struct IsdnCard *card)
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index aa29d1c..23c41fc 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -1025,7 +1025,7 @@
 	       cs->irq);
 }
 
-#ifdef CONFIG_PCI_LEGACY
+#ifdef CONFIG_PCI
 static 	struct pci_dev *dev_qs1000 __devinitdata = NULL;
 static 	struct pci_dev *dev_qs3000 __devinitdata = NULL;
 
@@ -1035,7 +1035,7 @@
 	struct IsdnCardState *cs = card->cs;
 
 	cs->subtyp = 0;
-	if ((dev_qs1000 = pci_find_device(PCI_VENDOR_ID_ELSA,
+	if ((dev_qs1000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA,
 		PCI_DEVICE_ID_ELSA_MICROLINK, dev_qs1000))) {
 		if (pci_enable_device(dev_qs1000))
 			return(0);
@@ -1043,7 +1043,7 @@
 		cs->irq = dev_qs1000->irq;
 		cs->hw.elsa.cfg = pci_resource_start(dev_qs1000, 1);
 		cs->hw.elsa.base = pci_resource_start(dev_qs1000, 3);
-	} else if ((dev_qs3000 = pci_find_device(PCI_VENDOR_ID_ELSA,
+	} else if ((dev_qs3000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA,
 		PCI_DEVICE_ID_ELSA_QS3000, dev_qs3000))) {
 		if (pci_enable_device(dev_qs3000))
 			return(0);
@@ -1093,7 +1093,7 @@
 {
 	return (1);
 }
-#endif /* CONFIG_PCI_LEGACY */
+#endif /* CONFIG_PCI */
 
 static int __devinit
 setup_elsa_common(struct IsdnCard *card)
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
index 39f421e..26264ab 100644
--- a/drivers/isdn/hisax/enternow_pci.c
+++ b/drivers/isdn/hisax/enternow_pci.c
@@ -406,7 +406,7 @@
 
 	for ( ;; )
 	{
-		if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+		if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
 			PCI_DEVICE_ID_TIGERJET_300,  dev_netjet))) {
 			ret = en_pci_probe(dev_netjet, cs);
 			if (!ret)
diff --git a/drivers/isdn/hisax/gazel.c b/drivers/isdn/hisax/gazel.c
index 0ea3b46..353982f 100644
--- a/drivers/isdn/hisax/gazel.c
+++ b/drivers/isdn/hisax/gazel.c
@@ -531,7 +531,7 @@
 	return (0);
 }
 
-#ifdef CONFIG_PCI_LEGACY
+#ifdef CONFIG_PCI
 static struct pci_dev *dev_tel __devinitdata = NULL;
 
 static int __devinit
@@ -546,7 +546,7 @@
 	found = 0;
 	seekcard = PCI_DEVICE_ID_PLX_R685;
 	for (nbseek = 0; nbseek < 4; nbseek++) {
-		if ((dev_tel = pci_find_device(PCI_VENDOR_ID_PLX,
+		if ((dev_tel = hisax_find_pci_device(PCI_VENDOR_ID_PLX,
 					seekcard, dev_tel))) {
 			if (pci_enable_device(dev_tel))
 				return 1;
@@ -620,7 +620,7 @@
 
 	return (0);
 }
-#endif /* CONFIG_PCI_LEGACY */
+#endif /* CONFIG_PCI */
 
 int __devinit
 setup_gazel(struct IsdnCard *card)
@@ -640,7 +640,7 @@
 			return (0);
 	} else {
 
-#ifdef CONFIG_PCI_LEGACY
+#ifdef CONFIG_PCI
 		if (setup_gazelpci(cs))
 			return (0);
 #else
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 1091473..917cc84 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1658,7 +1658,7 @@
 
 	i = 0;
 	while (id_list[i].vendor_id) {
-		tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
+		tmp_hfcpci = hisax_find_pci_device(id_list[i].vendor_id,
 					     id_list[i].device_id,
 					     dev_hfcpci);
 		i++;
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 0685c19..832a878 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -1323,3 +1323,26 @@
 char *HiSax_getrev(const char *revision);
 int TeiNew(void);
 void TeiFree(void);
+
+#ifdef CONFIG_PCI
+
+#include <linux/pci.h>
+
+/* adaptation wrapper for old usage
+ * WARNING! This is unfit for use in a PCI hotplug environment,
+ * as the returned PCI device can disappear at any moment in time.
+ * Callers should be converted to use pci_get_device() instead.
+ */
+static inline struct pci_dev *hisax_find_pci_device(unsigned int vendor,
+						    unsigned int device,
+						    struct pci_dev *from)
+{
+	struct pci_dev *pdev;
+
+	pci_dev_get(from);
+	pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
+	pci_dev_put(pdev);
+	return pdev;
+}
+
+#endif
diff --git a/drivers/isdn/hisax/niccy.c b/drivers/isdn/hisax/niccy.c
index ef00633..ccaa6e1 100644
--- a/drivers/isdn/hisax/niccy.c
+++ b/drivers/isdn/hisax/niccy.c
@@ -297,12 +297,12 @@
 			return 0;
 		}
 	} else {
-#ifdef CONFIG_PCI_LEGACY
+#ifdef CONFIG_PCI
 		static struct pci_dev *niccy_dev __devinitdata;
 
 		u_int pci_ioaddr;
 		cs->subtyp = 0;
-		if ((niccy_dev = pci_find_device(PCI_VENDOR_ID_SATSAGEM,
+		if ((niccy_dev = hisax_find_pci_device(PCI_VENDOR_ID_SATSAGEM,
 						 PCI_DEVICE_ID_SATSAGEM_NICCY,
 						 niccy_dev))) {
 			if (pci_enable_device(niccy_dev))
@@ -354,7 +354,7 @@
 		printk(KERN_WARNING "Niccy: io0 0 and NO_PCI_BIOS\n");
 		printk(KERN_WARNING "Niccy: unable to config NICCY PCI\n");
 		return 0;
-#endif				/* CONFIG_PCI_LEGACY */
+#endif				/* CONFIG_PCI */
 	}
 	printk(KERN_INFO "HiSax: NICCY %s config irq:%d data:0x%X ale:0x%X\n",
 		(cs->subtyp == 1) ? "PnP" : "PCI",
diff --git a/drivers/isdn/hisax/nj_s.c b/drivers/isdn/hisax/nj_s.c
index 8d36ccc..2344e7b 100644
--- a/drivers/isdn/hisax/nj_s.c
+++ b/drivers/isdn/hisax/nj_s.c
@@ -276,7 +276,7 @@
 
 	for ( ;; )
 	{
-		if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+		if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
 			PCI_DEVICE_ID_TIGERJET_300,  dev_netjet))) {
 			ret = njs_pci_probe(dev_netjet, cs);
 			if (!ret)
diff --git a/drivers/isdn/hisax/nj_u.c b/drivers/isdn/hisax/nj_u.c
index d306c94..095e974 100644
--- a/drivers/isdn/hisax/nj_u.c
+++ b/drivers/isdn/hisax/nj_u.c
@@ -240,7 +240,7 @@
 
 	for ( ;; )
 	{
-		if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+		if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
 			PCI_DEVICE_ID_TIGERJET_300,  dev_netjet))) {
 			ret = nju_pci_probe(dev_netjet, cs);
 			if (!ret)
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
index 5569a52..69dfc8d 100644
--- a/drivers/isdn/hisax/sedlbauer.c
+++ b/drivers/isdn/hisax/sedlbauer.c
@@ -598,7 +598,7 @@
 }
 #endif /* __ISAPNP__ */
 
-#ifdef CONFIG_PCI_LEGACY
+#ifdef CONFIG_PCI
 static struct pci_dev *dev_sedl __devinitdata = NULL;
 
 static int __devinit
@@ -607,7 +607,7 @@
 	struct IsdnCardState *cs = card->cs;
 	u16 sub_vendor_id, sub_id;
 
-	if ((dev_sedl = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+	if ((dev_sedl = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
 			PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) {
 		if (pci_enable_device(dev_sedl))
 			return(0);
@@ -673,7 +673,7 @@
 	return (1);
 }
 
-#endif /* CONFIG_PCI_LEGACY */
+#endif /* CONFIG_PCI */
 
 int __devinit
 setup_sedlbauer(struct IsdnCard *card)
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index 28b08de..b85ceb3 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -300,7 +300,7 @@
 	if (cs->typ != ISDN_CTYPE_TELESPCI)
 		return (0);
 
-	if ((dev_tel = pci_find_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) {
+	if ((dev_tel = hisax_find_pci_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) {
 		if (pci_enable_device(dev_tel))
 			return(0);
 		cs->irq = dev_tel->irq;
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index c4d862c..9d6e864 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -1007,7 +1007,7 @@
 		return (0);
 
 	while (id_list[id_idx].vendor_id) {
-		dev_w6692 = pci_find_device(id_list[id_idx].vendor_id,
+		dev_w6692 = hisax_find_pci_device(id_list[id_idx].vendor_id,
 					    id_list[id_idx].device_id,
 					    dev_w6692);
 		if (dev_w6692) {
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 23741ce..d840a10 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -322,8 +322,8 @@
 		adb_controller = NULL;
 	} else {
 #ifdef CONFIG_PPC
-		if (machine_is_compatible("AAPL,PowerBook1998") ||
-			machine_is_compatible("PowerBook1,1"))
+		if (of_machine_is_compatible("AAPL,PowerBook1998") ||
+			of_machine_is_compatible("PowerBook1,1"))
 			sleepy_trackpad = 1;
 #endif /* CONFIG_PPC */
 
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 454bc50..5738d8b 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -1899,7 +1899,7 @@
 	 */
 	if (rackmac)
 		cpu_pid_type = CPU_PID_TYPE_RACKMAC;
-	else if (machine_is_compatible("PowerMac7,3")
+	else if (of_machine_is_compatible("PowerMac7,3")
 	    && (cpu_count > 1)
 	    && fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID
 	    && fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) {
@@ -2234,10 +2234,10 @@
 {
 	struct device_node *np;
 
-	rackmac = machine_is_compatible("RackMac3,1");
+	rackmac = of_machine_is_compatible("RackMac3,1");
 
-	if (!machine_is_compatible("PowerMac7,2") &&
-	    !machine_is_compatible("PowerMac7,3") &&
+	if (!of_machine_is_compatible("PowerMac7,2") &&
+	    !of_machine_is_compatible("PowerMac7,3") &&
 	    !rackmac)
 	    	return -ENODEV;
 
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index ba48fd7..7fb8b4d 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -490,7 +490,7 @@
 	info = of_get_property(np, "thermal-info", NULL);
 	of_node_put(np);
 
-	if( !info || !machine_is_compatible("PowerMac3,6") )
+	if( !info || !of_machine_is_compatible("PowerMac3,6") )
 		return -ENODEV;
 
 	if( info->id != 3 ) {
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index a348bb0..4f3c447 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -150,13 +150,13 @@
 
 	/* Special case for the old PowerBook since I can't test on it */
 	autosave =
-		machine_is_compatible("AAPL,3400/2400") ||
-		machine_is_compatible("AAPL,3500");
+		of_machine_is_compatible("AAPL,3400/2400") ||
+		of_machine_is_compatible("AAPL,3500");
 
 	if (!autosave &&
 	    !pmac_has_backlight_type("pmu") &&
-	    !machine_is_compatible("AAPL,PowerBook1998") &&
-	    !machine_is_compatible("PowerBook1,1"))
+	    !of_machine_is_compatible("AAPL,PowerBook1998") &&
+	    !of_machine_is_compatible("PowerBook1,1"))
 		return;
 
 	snprintf(name, sizeof(name), "pmubl");
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index db379c3..4276484 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -463,8 +463,8 @@
 #endif
 
 #ifdef CONFIG_PPC32
-  	if (machine_is_compatible("AAPL,3400/2400") ||
-  		machine_is_compatible("AAPL,3500")) {
+  	if (of_machine_is_compatible("AAPL,3400/2400") ||
+  		of_machine_is_compatible("AAPL,3500")) {
 		int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO,
 			NULL, PMAC_MB_INFO_MODEL, 0);
 		pmu_battery_count = 1;
@@ -472,8 +472,8 @@
 			pmu_batteries[0].flags |= PMU_BATT_TYPE_COMET;
 		else
 			pmu_batteries[0].flags |= PMU_BATT_TYPE_HOOPER;
-	} else if (machine_is_compatible("AAPL,PowerBook1998") ||
-		machine_is_compatible("PowerBook1,1")) {
+	} else if (of_machine_is_compatible("AAPL,PowerBook1998") ||
+		of_machine_is_compatible("PowerBook1,1")) {
 		pmu_battery_count = 2;
 		pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART;
 		pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART;
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 075b4d9..437f55c 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -468,9 +468,9 @@
 	DBG("wf: core loaded\n");
 
 	/* Don't register on old machines that use therm_pm72 for now */
-	if (machine_is_compatible("PowerMac7,2") ||
-	    machine_is_compatible("PowerMac7,3") ||
-	    machine_is_compatible("RackMac3,1"))
+	if (of_machine_is_compatible("PowerMac7,2") ||
+	    of_machine_is_compatible("PowerMac7,3") ||
+	    of_machine_is_compatible("RackMac3,1"))
 		return -ENODEV;
 	platform_device_register(&wf_platform_device);
 	return 0;
diff --git a/drivers/macintosh/windfarm_cpufreq_clamp.c b/drivers/macintosh/windfarm_cpufreq_clamp.c
index 900aade..1a77a7c 100644
--- a/drivers/macintosh/windfarm_cpufreq_clamp.c
+++ b/drivers/macintosh/windfarm_cpufreq_clamp.c
@@ -76,9 +76,9 @@
 	struct wf_control *clamp;
 
 	/* Don't register on old machines that use therm_pm72 for now */
-	if (machine_is_compatible("PowerMac7,2") ||
-	    machine_is_compatible("PowerMac7,3") ||
-	    machine_is_compatible("RackMac3,1"))
+	if (of_machine_is_compatible("PowerMac7,2") ||
+	    of_machine_is_compatible("PowerMac7,3") ||
+	    of_machine_is_compatible("RackMac3,1"))
 		return -ENODEV;
 
 	clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index ed6426a..d8257d3 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -239,9 +239,9 @@
 static int __init wf_lm75_sensor_init(void)
 {
 	/* Don't register on old machines that use therm_pm72 for now */
-	if (machine_is_compatible("PowerMac7,2") ||
-	    machine_is_compatible("PowerMac7,3") ||
-	    machine_is_compatible("RackMac3,1"))
+	if (of_machine_is_compatible("PowerMac7,2") ||
+	    of_machine_is_compatible("PowerMac7,3") ||
+	    of_machine_is_compatible("RackMac3,1"))
 		return -ENODEV;
 	return i2c_add_driver(&wf_lm75_driver);
 }
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index a67b349..b486eb9 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -188,9 +188,9 @@
 static int __init wf_max6690_sensor_init(void)
 {
 	/* Don't register on old machines that use therm_pm72 for now */
-	if (machine_is_compatible("PowerMac7,2") ||
-	    machine_is_compatible("PowerMac7,3") ||
-	    machine_is_compatible("RackMac3,1"))
+	if (of_machine_is_compatible("PowerMac7,2") ||
+	    of_machine_is_compatible("PowerMac7,3") ||
+	    of_machine_is_compatible("RackMac3,1"))
 		return -ENODEV;
 	return i2c_add_driver(&wf_max6690_driver);
 }
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index 73d695d..e0ee807 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -676,7 +676,7 @@
 {
 	struct device_node *cpu;
 
-	if (!machine_is_compatible("PowerMac11,2"))
+	if (!of_machine_is_compatible("PowerMac11,2"))
 		return -ENODEV;
 
 	/* Count the number of CPU cores */
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
index 66ec4fb1..947d4af 100644
--- a/drivers/macintosh/windfarm_pm121.c
+++ b/drivers/macintosh/windfarm_pm121.c
@@ -1008,7 +1008,7 @@
 {
 	int rc = -ENODEV;
 
-	if (machine_is_compatible("PowerMac12,1"))
+	if (of_machine_is_compatible("PowerMac12,1"))
 		rc = pm121_init_pm();
 
 	if (rc == 0) {
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index abbe206..565d5b2 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -779,8 +779,8 @@
 {
 	int rc = -ENODEV;
 
-	if (machine_is_compatible("PowerMac8,1") ||
-	    machine_is_compatible("PowerMac8,2"))
+	if (of_machine_is_compatible("PowerMac8,1") ||
+	    of_machine_is_compatible("PowerMac8,2"))
 		rc = wf_init_pm();
 
 	if (rc == 0) {
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index 764c525..bea9916 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -711,7 +711,7 @@
 {
 	int rc = -ENODEV;
 
-	if (machine_is_compatible("PowerMac9,1"))
+	if (of_machine_is_compatible("PowerMac9,1"))
 		rc = wf_init_pm();
 
 	if (rc == 0) {
diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
index 9c567b9..3c19350 100644
--- a/drivers/macintosh/windfarm_smu_sensors.c
+++ b/drivers/macintosh/windfarm_smu_sensors.c
@@ -363,9 +363,9 @@
 	 * I yet have to figure out what's up with 8,2 and will have to
 	 * adjust for later, unless we can 100% trust the SDB partition...
 	 */
-	if ((machine_is_compatible("PowerMac8,1") ||
-	     machine_is_compatible("PowerMac8,2") ||
-	     machine_is_compatible("PowerMac9,1")) &&
+	if ((of_machine_is_compatible("PowerMac8,1") ||
+	     of_machine_is_compatible("PowerMac8,2") ||
+	     of_machine_is_compatible("PowerMac9,1")) &&
 	    cpuvcp_version >= 2) {
 		pow->quadratic = 1;
 		DBG("windfarm: CPU Power using quadratic transform\n");
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 8b8558f..b11533f 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -504,6 +504,7 @@
 				       "bytes left in TS.  Resyncing.\n", ts_remain);
 				priv->ule_sndu_len = 0;
 				priv->need_pusi = 1;
+				ts += TS_SZ;
 				continue;
 			}
 
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 44d2037..5382b5a 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -126,8 +126,6 @@
  *  Public data...
  */
 
-static struct proc_dir_entry *mpt_proc_root_dir;
-
 #define WHOINIT_UNKNOWN		0xAA
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -146,6 +144,9 @@
 static MPT_RESETHANDLER		 MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
 static struct mpt_pci_driver 	*MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
 
+#ifdef CONFIG_PROC_FS
+static struct proc_dir_entry 	*mpt_proc_root_dir;
+#endif
 
 /*
  *  Driver Callback Index's
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b494867..9718c8f 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
 #define COPYRIGHT	"Copyright (c) 1999-2008 " MODULEAUTHOR
 #endif
 
-#define MPT_LINUX_VERSION_COMMON	"3.04.13"
-#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.13"
+#define MPT_LINUX_VERSION_COMMON	"3.04.14"
+#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.14"
 #define WHAT_MAGIC_STRING		"@" "(" "#" ")"
 
 #define show_mptmod_ver(s,ver)  \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 352acd0..caa8f56 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -360,8 +360,8 @@
 	u16		 iocstatus;
 
 	/* bus reset is only good for SCSI IO, RAID PASSTHRU */
-	if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) ||
-	    (function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
+	if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+		function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
 		dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
 			"TaskMgmt, not SCSI_IO!!\n", ioc->name));
 		return -EPERM;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index ebf6ae0..612ab3c 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -195,29 +195,34 @@
 	unsigned long		flags;
 	int			ready;
 	MPT_ADAPTER 		*ioc;
+	int			loops = 40;	/* seconds */
 
 	hd = shost_priv(SCpnt->device->host);
 	ioc = hd->ioc;
 	spin_lock_irqsave(shost->host_lock, flags);
-	while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY) {
+	while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY
+	 || (loops > 0 && ioc->active == 0)) {
 		spin_unlock_irqrestore(shost->host_lock, flags);
 		dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
 			"mptfc_block_error_handler.%d: %d:%d, port status is "
-			"DID_IMM_RETRY, deferring %s recovery.\n",
+			"%x, active flag %d, deferring %s recovery.\n",
 			ioc->name, ioc->sh->host_no,
-			SCpnt->device->id, SCpnt->device->lun, caller));
+			SCpnt->device->id, SCpnt->device->lun,
+			ready, ioc->active, caller));
 		msleep(1000);
 		spin_lock_irqsave(shost->host_lock, flags);
+		loops --;
 	}
 	spin_unlock_irqrestore(shost->host_lock, flags);
 
-	if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata) {
+	if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata
+	 || ioc->active == 0) {
 		dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
 			"%s.%d: %d:%d, failing recovery, "
-			"port state %d, vdevice %p.\n", caller,
+			"port state %x, active %d, vdevice %p.\n", caller,
 			ioc->name, ioc->sh->host_no,
 			SCpnt->device->id, SCpnt->device->lun, ready,
-			SCpnt->device->hostdata));
+			ioc->active, SCpnt->device->hostdata));
 		return FAILED;
 	}
 	dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 83873e3..c20bbe4 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1075,6 +1075,19 @@
 	return 0;
 }
 
+static void
+mptsas_block_io_sdev(struct scsi_device *sdev, void *data)
+{
+	scsi_device_set_state(sdev, SDEV_BLOCK);
+}
+
+static void
+mptsas_block_io_starget(struct scsi_target *starget)
+{
+	if (starget)
+		starget_for_each_device(starget, NULL, mptsas_block_io_sdev);
+}
+
 /**
  * mptsas_target_reset_queue
  *
@@ -1098,10 +1111,11 @@
 	id = sas_event_data->TargetID;
 	channel = sas_event_data->Bus;
 
-	if (!(vtarget = mptsas_find_vtarget(ioc, channel, id)))
-		return;
-
-	vtarget->deleted = 1; /* block IO */
+	vtarget = mptsas_find_vtarget(ioc, channel, id);
+	if (vtarget) {
+		mptsas_block_io_starget(vtarget->starget);
+		vtarget->deleted = 1; /* block IO */
+	}
 
 	target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
 	    GFP_ATOMIC);
@@ -1868,7 +1882,8 @@
 	if (ioc->sas_discovery_quiesce_io)
 		return SCSI_MLQUEUE_HOST_BUSY;
 
-//	scsi_print_command(SCpnt);
+	if (ioc->debug_level & MPT_DEBUG_SCSI)
+		scsi_print_command(SCpnt);
 
 	return mptscsih_qcmd(SCpnt,done);
 }
@@ -2686,6 +2701,187 @@
 	return error;
 }
 
+struct rep_manu_request{
+	u8 smp_frame_type;
+	u8 function;
+	u8 reserved;
+	u8 request_length;
+};
+
+struct rep_manu_reply{
+	u8 smp_frame_type; /* 0x41 */
+	u8 function; /* 0x01 */
+	u8 function_result;
+	u8 response_length;
+	u16 expander_change_count;
+	u8 reserved0[2];
+	u8 sas_format:1;
+	u8 reserved1:7;
+	u8 reserved2[3];
+	u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+	u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+	u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+	u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+	u16 component_id;
+	u8 component_revision_id;
+	u8 reserved3;
+	u8 vendor_specific[8];
+};
+
+/**
+  * mptsas_exp_repmanufacture_info -
+  * @ioc: per adapter object
+  * @sas_address: expander sas address
+  * @edev: the sas_expander_device object
+  *
+  * Fills in the sas_expander_device object when SMP port is created.
+  *
+  * Returns 0 for success, non-zero for failure.
+  */
+static int
+mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
+	u64 sas_address, struct sas_expander_device *edev)
+{
+	MPT_FRAME_HDR *mf;
+	SmpPassthroughRequest_t *smpreq;
+	SmpPassthroughReply_t *smprep;
+	struct rep_manu_reply *manufacture_reply;
+	struct rep_manu_request *manufacture_request;
+	int ret;
+	int flagsLength;
+	unsigned long timeleft;
+	char *psge;
+	unsigned long flags;
+	void *data_out = NULL;
+	dma_addr_t data_out_dma = 0;
+	u32 sz;
+
+	spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+	if (ioc->ioc_reset_in_progress) {
+		spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+		printk(MYIOC_s_INFO_FMT "%s: host reset in progress!\n",
+			__func__, ioc->name);
+		return -EFAULT;
+	}
+	spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+	ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
+	if (ret)
+		goto out;
+
+	mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
+	if (!mf) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	smpreq = (SmpPassthroughRequest_t *)mf;
+	memset(smpreq, 0, sizeof(*smpreq));
+
+	sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
+
+	data_out = pci_alloc_consistent(ioc->pcidev, sz, &data_out_dma);
+	if (!data_out) {
+		printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
+		ret = -ENOMEM;
+		goto put_mf;
+	}
+
+	manufacture_request = data_out;
+	manufacture_request->smp_frame_type = 0x40;
+	manufacture_request->function = 1;
+	manufacture_request->reserved = 0;
+	manufacture_request->request_length = 0;
+
+	smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
+	smpreq->PhysicalPort = 0xFF;
+	*((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
+	smpreq->RequestDataLength = sizeof(struct rep_manu_request);
+
+	psge = (char *)
+		(((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
+
+	flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+		MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+		MPI_SGE_FLAGS_HOST_TO_IOC |
+		MPI_SGE_FLAGS_END_OF_BUFFER;
+	flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
+	flagsLength |= sizeof(struct rep_manu_request);
+
+	ioc->add_sge(psge, flagsLength, data_out_dma);
+	psge += ioc->SGE_size;
+
+	flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+		MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+		MPI_SGE_FLAGS_IOC_TO_HOST |
+		MPI_SGE_FLAGS_END_OF_BUFFER;
+	flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
+	flagsLength |= sizeof(struct rep_manu_reply);
+	ioc->add_sge(psge, flagsLength, data_out_dma +
+	sizeof(struct rep_manu_request));
+
+	INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
+	mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
+
+	timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
+	if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+		ret = -ETIME;
+		mpt_free_msg_frame(ioc, mf);
+		mf = NULL;
+		if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+			goto out_free;
+		if (!timeleft)
+			mpt_HardResetHandler(ioc, CAN_SLEEP);
+		goto out_free;
+	}
+
+	mf = NULL;
+
+	if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
+		u8 *tmp;
+
+	smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
+	if (le16_to_cpu(smprep->ResponseDataLength) !=
+		sizeof(struct rep_manu_reply))
+			goto out_free;
+
+	manufacture_reply = data_out + sizeof(struct rep_manu_request);
+	strncpy(edev->vendor_id, manufacture_reply->vendor_id,
+		SAS_EXPANDER_VENDOR_ID_LEN);
+	strncpy(edev->product_id, manufacture_reply->product_id,
+		SAS_EXPANDER_PRODUCT_ID_LEN);
+	strncpy(edev->product_rev, manufacture_reply->product_rev,
+		SAS_EXPANDER_PRODUCT_REV_LEN);
+	edev->level = manufacture_reply->sas_format;
+	if (manufacture_reply->sas_format) {
+		strncpy(edev->component_vendor_id,
+			manufacture_reply->component_vendor_id,
+				SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+		tmp = (u8 *)&manufacture_reply->component_id;
+		edev->component_id = tmp[0] << 8 | tmp[1];
+		edev->component_revision_id =
+			manufacture_reply->component_revision_id;
+		}
+	} else {
+		printk(MYIOC_s_ERR_FMT
+			"%s: smp passthru reply failed to be returned\n",
+			ioc->name, __func__);
+		ret = -ENXIO;
+	}
+out_free:
+	if (data_out_dma)
+		pci_free_consistent(ioc->pcidev, sz, data_out, data_out_dma);
+put_mf:
+	if (mf)
+		mpt_free_msg_frame(ioc, mf);
+out_unlock:
+	CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
+	mutex_unlock(&ioc->sas_mgmt.mutex);
+out:
+	return ret;
+ }
+
 static void
 mptsas_parse_device_info(struct sas_identify *identify,
 		struct mptsas_devinfo *device_info)
@@ -2967,6 +3163,11 @@
 			goto out;
 		}
 		mptsas_set_rphy(ioc, phy_info, rphy);
+		if (identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
+			identify.device_type == SAS_FANOUT_EXPANDER_DEVICE)
+				mptsas_exp_repmanufacture_info(ioc,
+					identify.sas_address,
+					rphy_to_expander_device(rphy));
 	}
 
  out:
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 81279b3..4a7d1af 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1438,9 +1438,14 @@
 	    && (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
 	    && (SCpnt->device->tagged_supported)) {
 		scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
-	} else {
+		if (SCpnt->request && SCpnt->request->ioprio) {
+			if (((SCpnt->request->ioprio & 0x7) == 1) ||
+				!(SCpnt->request->ioprio & 0x7))
+				scsictl |= MPI_SCSIIO_CONTROL_HEADOFQ;
+		}
+	} else
 		scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
-	}
+
 
 	/* Use the above information to set up the message frame
 	 */
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 677cd53..bb64656 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -457,10 +457,10 @@
 
 config MTD_NAND_SH_FLCTL
 	tristate "Support for NAND on Renesas SuperH FLCTL"
-	depends on MTD_NAND && SUPERH && CPU_SUBTYPE_SH7723
+	depends on MTD_NAND && SUPERH
 	help
 	  Several Renesas SuperH CPU has FLCTL. This option enables support
-	  for NAND Flash using FLCTL. This driver support SH7723.
+	  for NAND Flash using FLCTL.
 
 config MTD_NAND_DAVINCI
         tristate "Support NAND on DaVinci SoC"
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 02bef21..1842df8 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -1,10 +1,10 @@
 /*
  * SuperH FLCTL nand controller
  *
- * Copyright © 2008 Renesas Solutions Corp.
- * Copyright © 2008 Atom Create Engineering Co., Ltd.
+ * Copyright (c) 2008 Renesas Solutions Corp.
+ * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
  *
- * Based on fsl_elbc_nand.c, Copyright © 2006-2007 Freescale Semiconductor
+ * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -75,6 +75,11 @@
 	writeb(TRSTRT, FLTRCR(flctl));
 }
 
+static void timeout_error(struct sh_flctl *flctl, const char *str)
+{
+	dev_err(&flctl->pdev->dev, "Timeout occured in %s\n", str);
+}
+
 static void wait_completion(struct sh_flctl *flctl)
 {
 	uint32_t timeout = LOOP_TIMEOUT_MAX;
@@ -87,7 +92,7 @@
 		udelay(1);
 	}
 
-	printk(KERN_ERR "wait_completion(): Timeout occured \n");
+	timeout_error(flctl, __func__);
 	writeb(0x0, FLTRCR(flctl));
 }
 
@@ -100,6 +105,8 @@
 		addr = page_addr;	/* ERASE1 */
 	} else if (page_addr != -1) {
 		/* SEQIN, READ0, etc.. */
+		if (flctl->chip.options & NAND_BUSWIDTH_16)
+			column >>= 1;
 		if (flctl->page_size) {
 			addr = column & 0x0FFF;
 			addr |= (page_addr & 0xff) << 16;
@@ -132,7 +139,7 @@
 			return;
 		udelay(1);
 	}
-	printk(KERN_ERR "wait_rfifo_ready(): Timeout occured \n");
+	timeout_error(flctl, __func__);
 }
 
 static void wait_wfifo_ready(struct sh_flctl *flctl)
@@ -146,7 +153,7 @@
 			return;
 		udelay(1);
 	}
-	printk(KERN_ERR "wait_wfifo_ready(): Timeout occured \n");
+	timeout_error(flctl, __func__);
 }
 
 static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number)
@@ -198,7 +205,7 @@
 		writel(0, FL4ECCCR(flctl));
 	}
 
-	printk(KERN_ERR "wait_recfifo_ready(): Timeout occured \n");
+	timeout_error(flctl, __func__);
 	return 1;	/* timeout */
 }
 
@@ -214,7 +221,7 @@
 			return;
 		udelay(1);
 	}
-	printk(KERN_ERR "wait_wecfifo_ready(): Timeout occured \n");
+	timeout_error(flctl, __func__);
 }
 
 static void read_datareg(struct sh_flctl *flctl, int offset)
@@ -275,7 +282,7 @@
 static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
 {
 	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	uint32_t flcmncr_val = readl(FLCMNCR(flctl));
+	uint32_t flcmncr_val = readl(FLCMNCR(flctl)) & ~SEL_16BIT;
 	uint32_t flcmdcr_val, addr_len_bytes = 0;
 
 	/* Set SNAND bit if page size is 2048byte */
@@ -297,6 +304,8 @@
 	case NAND_CMD_READOOB:
 		addr_len_bytes = flctl->rw_ADRCNT;
 		flcmdcr_val |= CDSRC_E;
+		if (flctl->chip.options & NAND_BUSWIDTH_16)
+			flcmncr_val |= SEL_16BIT;
 		break;
 	case NAND_CMD_SEQIN:
 		/* This case is that cmd is READ0 or READ1 or READ00 */
@@ -305,6 +314,8 @@
 	case NAND_CMD_PAGEPROG:
 		addr_len_bytes = flctl->rw_ADRCNT;
 		flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
+		if (flctl->chip.options & NAND_BUSWIDTH_16)
+			flcmncr_val |= SEL_16BIT;
 		break;
 	case NAND_CMD_READID:
 		flcmncr_val &= ~SNAND_E;
@@ -523,6 +534,8 @@
 		set_addr(mtd, 0, page_addr);
 
 		flctl->read_bytes = mtd->writesize + mtd->oobsize;
+		if (flctl->chip.options & NAND_BUSWIDTH_16)
+			column >>= 1;
 		flctl->index += column;
 		goto read_normal_exit;
 
@@ -686,6 +699,18 @@
 	return data;
 }
 
+static uint16_t flctl_read_word(struct mtd_info *mtd)
+{
+       struct sh_flctl *flctl = mtd_to_flctl(mtd);
+       int index = flctl->index;
+       uint16_t data;
+       uint16_t *buf = (uint16_t *)&flctl->done_buff[index];
+
+       data = *buf;
+       flctl->index += 2;
+       return data;
+}
+
 static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
 {
 	int i;
@@ -769,38 +794,36 @@
 	return 0;
 }
 
-static int __init flctl_probe(struct platform_device *pdev)
+static int __devinit flctl_probe(struct platform_device *pdev)
 {
 	struct resource *res;
 	struct sh_flctl *flctl;
 	struct mtd_info *flctl_mtd;
 	struct nand_chip *nand;
 	struct sh_flctl_platform_data *pdata;
-	int ret;
+	int ret = -ENXIO;
 
 	pdata = pdev->dev.platform_data;
 	if (pdata == NULL) {
-		printk(KERN_ERR "sh_flctl platform_data not found.\n");
-		return -ENODEV;
+		dev_err(&pdev->dev, "no platform data defined\n");
+		return -EINVAL;
 	}
 
 	flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
 	if (!flctl) {
-		printk(KERN_ERR "Unable to allocate NAND MTD dev structure.\n");
+		dev_err(&pdev->dev, "failed to allocate driver data\n");
 		return -ENOMEM;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
-		printk(KERN_ERR "%s: resource not found.\n", __func__);
-		ret = -ENODEV;
+		dev_err(&pdev->dev, "failed to get I/O memory\n");
 		goto err;
 	}
 
-	flctl->reg = ioremap(res->start, res->end - res->start + 1);
+	flctl->reg = ioremap(res->start, resource_size(res));
 	if (flctl->reg == NULL) {
-		printk(KERN_ERR "%s: ioremap error.\n", __func__);
-		ret = -ENOMEM;
+		dev_err(&pdev->dev, "failed to remap I/O memory\n");
 		goto err;
 	}
 
@@ -808,6 +831,7 @@
 	flctl_mtd = &flctl->mtd;
 	nand = &flctl->chip;
 	flctl_mtd->priv = nand;
+	flctl->pdev = pdev;
 	flctl->hwecc = pdata->has_hwecc;
 
 	flctl_register_init(flctl, pdata->flcmncr_val);
@@ -825,6 +849,11 @@
 	nand->select_chip = flctl_select_chip;
 	nand->cmdfunc = flctl_cmdfunc;
 
+	if (pdata->flcmncr_val & SEL_16BIT) {
+		nand->options |= NAND_BUSWIDTH_16;
+		nand->read_word = flctl_read_word;
+	}
+
 	ret = nand_scan_ident(flctl_mtd, 1);
 	if (ret)
 		goto err;
@@ -846,7 +875,7 @@
 	return ret;
 }
 
-static int __exit flctl_remove(struct platform_device *pdev)
+static int __devexit flctl_remove(struct platform_device *pdev)
 {
 	struct sh_flctl *flctl = platform_get_drvdata(pdev);
 
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index d9fbad3..43aea91 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -206,7 +206,7 @@
 		mp->port_aaui = port_aaui;
 	else {
 		/* Apple Network Server uses the AAUI port */
-		if (machine_is_compatible("AAPL,ShinerESB"))
+		if (of_machine_is_compatible("AAPL,ShinerESB"))
 			mp->port_aaui = 1;
 		else {
 #ifdef CONFIG_MACE_AAUI_PORT
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index d2fa27c..7cecc8f 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -1,3 +1,11 @@
+config OF_FLATTREE
+	bool
+	depends on OF
+
+config OF_DYNAMIC
+	def_bool y
+	depends on OF && PPC_OF
+
 config OF_DEVICE
 	def_bool y
 	depends on OF && (SPARC || PPC_OF || MICROBLAZE)
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index bdfb5f5..f232cc9 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -1,4 +1,5 @@
 obj-y = base.o
+obj-$(CONFIG_OF_FLATTREE) += fdt.o
 obj-$(CONFIG_OF_DEVICE) += device.o platform.o
 obj-$(CONFIG_OF_GPIO)   += gpio.o
 obj-$(CONFIG_OF_I2C)	+= of_i2c.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index e6627b2..cb96888 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -20,8 +20,10 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/spinlock.h>
+#include <linux/proc_fs.h>
 
 struct device_node *allnodes;
+struct device_node *of_chosen;
 
 /* use when traversing tree through the allnext, child, sibling,
  * or parent members of struct device_node.
@@ -37,7 +39,7 @@
 			np = np->parent;
 		ip = of_get_property(np, "#address-cells", NULL);
 		if (ip)
-			return *ip;
+			return be32_to_cpup(ip);
 	} while (np->parent);
 	/* No #address-cells property for the root node */
 	return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
@@ -53,13 +55,88 @@
 			np = np->parent;
 		ip = of_get_property(np, "#size-cells", NULL);
 		if (ip)
-			return *ip;
+			return be32_to_cpup(ip);
 	} while (np->parent);
 	/* No #size-cells property for the root node */
 	return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
 }
 EXPORT_SYMBOL(of_n_size_cells);
 
+#if !defined(CONFIG_SPARC)   /* SPARC doesn't do ref counting (yet) */
+/**
+ *	of_node_get - Increment refcount of a node
+ *	@node:	Node to inc refcount, NULL is supported to
+ *		simplify writing of callers
+ *
+ *	Returns node.
+ */
+struct device_node *of_node_get(struct device_node *node)
+{
+	if (node)
+		kref_get(&node->kref);
+	return node;
+}
+EXPORT_SYMBOL(of_node_get);
+
+static inline struct device_node *kref_to_device_node(struct kref *kref)
+{
+	return container_of(kref, struct device_node, kref);
+}
+
+/**
+ *	of_node_release - release a dynamically allocated node
+ *	@kref:  kref element of the node to be released
+ *
+ *	In of_node_put() this function is passed to kref_put()
+ *	as the destructor.
+ */
+static void of_node_release(struct kref *kref)
+{
+	struct device_node *node = kref_to_device_node(kref);
+	struct property *prop = node->properties;
+
+	/* We should never be releasing nodes that haven't been detached. */
+	if (!of_node_check_flag(node, OF_DETACHED)) {
+		pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name);
+		dump_stack();
+		kref_init(&node->kref);
+		return;
+	}
+
+	if (!of_node_check_flag(node, OF_DYNAMIC))
+		return;
+
+	while (prop) {
+		struct property *next = prop->next;
+		kfree(prop->name);
+		kfree(prop->value);
+		kfree(prop);
+		prop = next;
+
+		if (!prop) {
+			prop = node->deadprops;
+			node->deadprops = NULL;
+		}
+	}
+	kfree(node->full_name);
+	kfree(node->data);
+	kfree(node);
+}
+
+/**
+ *	of_node_put - Decrement refcount of a node
+ *	@node:	Node to dec refcount, NULL is supported to
+ *		simplify writing of callers
+ *
+ */
+void of_node_put(struct device_node *node)
+{
+	if (node)
+		kref_put(&node->kref, of_node_release);
+}
+EXPORT_SYMBOL(of_node_put);
+#endif /* !CONFIG_SPARC */
+
 struct property *of_find_property(const struct device_node *np,
 				  const char *name,
 				  int *lenp)
@@ -144,6 +221,27 @@
 EXPORT_SYMBOL(of_device_is_compatible);
 
 /**
+ * of_machine_is_compatible - Test root of device tree for a given compatible value
+ * @compat: compatible string to look for in root node's compatible property.
+ *
+ * Returns true if the root node has the given value in its
+ * compatible property.
+ */
+int of_machine_is_compatible(const char *compat)
+{
+	struct device_node *root;
+	int rc = 0;
+
+	root = of_find_node_by_path("/");
+	if (root) {
+		rc = of_device_is_compatible(root, compat);
+		of_node_put(root);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(of_machine_is_compatible);
+
+/**
  *  of_device_is_available - check if a device is available for use
  *
  *  @device: Node to check for availability
@@ -519,6 +617,27 @@
 EXPORT_SYMBOL_GPL(of_modalias_node);
 
 /**
+ * of_find_node_by_phandle - Find a node given a phandle
+ * @handle:	phandle of the node to find
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_find_node_by_phandle(phandle handle)
+{
+	struct device_node *np;
+
+	read_lock(&devtree_lock);
+	for (np = allnodes; np; np = np->allnext)
+		if (np->phandle == handle)
+			break;
+	of_node_get(np);
+	read_unlock(&devtree_lock);
+	return np;
+}
+EXPORT_SYMBOL(of_find_node_by_phandle);
+
+/**
  * of_parse_phandle - Resolve a phandle property to a device_node pointer
  * @np: Pointer to device node holding phandle property
  * @phandle_name: Name of property holding a phandle value
@@ -578,8 +697,8 @@
 				const void **out_args)
 {
 	int ret = -EINVAL;
-	const u32 *list;
-	const u32 *list_end;
+	const __be32 *list;
+	const __be32 *list_end;
 	int size;
 	int cur_index = 0;
 	struct device_node *node = NULL;
@@ -593,7 +712,7 @@
 	list_end = list + size / sizeof(*list);
 
 	while (list < list_end) {
-		const u32 *cells;
+		const __be32 *cells;
 		const phandle *phandle;
 
 		phandle = list++;
@@ -617,7 +736,7 @@
 			goto err1;
 		}
 
-		list += *cells;
+		list += be32_to_cpup(cells);
 		if (list > list_end) {
 			pr_debug("%s: insufficient arguments length\n",
 				 np->full_name);
@@ -658,3 +777,190 @@
 	return ret;
 }
 EXPORT_SYMBOL(of_parse_phandles_with_args);
+
+/**
+ * prom_add_property - Add a property to a node
+ */
+int prom_add_property(struct device_node *np, struct property *prop)
+{
+	struct property **next;
+	unsigned long flags;
+
+	prop->next = NULL;
+	write_lock_irqsave(&devtree_lock, flags);
+	next = &np->properties;
+	while (*next) {
+		if (strcmp(prop->name, (*next)->name) == 0) {
+			/* duplicate ! don't insert it */
+			write_unlock_irqrestore(&devtree_lock, flags);
+			return -1;
+		}
+		next = &(*next)->next;
+	}
+	*next = prop;
+	write_unlock_irqrestore(&devtree_lock, flags);
+
+#ifdef CONFIG_PROC_DEVICETREE
+	/* try to add to proc as well if it was initialized */
+	if (np->pde)
+		proc_device_tree_add_prop(np->pde, prop);
+#endif /* CONFIG_PROC_DEVICETREE */
+
+	return 0;
+}
+
+/**
+ * prom_remove_property - Remove a property from a node.
+ *
+ * Note that we don't actually remove it, since we have given out
+ * who-knows-how-many pointers to the data using get-property.
+ * Instead we just move the property to the "dead properties"
+ * list, so it won't be found any more.
+ */
+int prom_remove_property(struct device_node *np, struct property *prop)
+{
+	struct property **next;
+	unsigned long flags;
+	int found = 0;
+
+	write_lock_irqsave(&devtree_lock, flags);
+	next = &np->properties;
+	while (*next) {
+		if (*next == prop) {
+			/* found the node */
+			*next = prop->next;
+			prop->next = np->deadprops;
+			np->deadprops = prop;
+			found = 1;
+			break;
+		}
+		next = &(*next)->next;
+	}
+	write_unlock_irqrestore(&devtree_lock, flags);
+
+	if (!found)
+		return -ENODEV;
+
+#ifdef CONFIG_PROC_DEVICETREE
+	/* try to remove the proc node as well */
+	if (np->pde)
+		proc_device_tree_remove_prop(np->pde, prop);
+#endif /* CONFIG_PROC_DEVICETREE */
+
+	return 0;
+}
+
+/*
+ * prom_update_property - Update a property in a node.
+ *
+ * Note that we don't actually remove it, since we have given out
+ * who-knows-how-many pointers to the data using get-property.
+ * Instead we just move the property to the "dead properties" list,
+ * and add the new property to the property list
+ */
+int prom_update_property(struct device_node *np,
+			 struct property *newprop,
+			 struct property *oldprop)
+{
+	struct property **next;
+	unsigned long flags;
+	int found = 0;
+
+	write_lock_irqsave(&devtree_lock, flags);
+	next = &np->properties;
+	while (*next) {
+		if (*next == oldprop) {
+			/* found the node */
+			newprop->next = oldprop->next;
+			*next = newprop;
+			oldprop->next = np->deadprops;
+			np->deadprops = oldprop;
+			found = 1;
+			break;
+		}
+		next = &(*next)->next;
+	}
+	write_unlock_irqrestore(&devtree_lock, flags);
+
+	if (!found)
+		return -ENODEV;
+
+#ifdef CONFIG_PROC_DEVICETREE
+	/* try to add to proc as well if it was initialized */
+	if (np->pde)
+		proc_device_tree_update_prop(np->pde, newprop, oldprop);
+#endif /* CONFIG_PROC_DEVICETREE */
+
+	return 0;
+}
+
+#if defined(CONFIG_OF_DYNAMIC)
+/*
+ * Support for dynamic device trees.
+ *
+ * On some platforms, the device tree can be manipulated at runtime.
+ * The routines in this section support adding, removing and changing
+ * device tree nodes.
+ */
+
+/**
+ * of_attach_node - Plug a device node into the tree and global list.
+ */
+void of_attach_node(struct device_node *np)
+{
+	unsigned long flags;
+
+	write_lock_irqsave(&devtree_lock, flags);
+	np->sibling = np->parent->child;
+	np->allnext = allnodes;
+	np->parent->child = np;
+	allnodes = np;
+	write_unlock_irqrestore(&devtree_lock, flags);
+}
+
+/**
+ * of_detach_node - "Unplug" a node from the device tree.
+ *
+ * The caller must hold a reference to the node.  The memory associated with
+ * the node is not freed until its refcount goes to zero.
+ */
+void of_detach_node(struct device_node *np)
+{
+	struct device_node *parent;
+	unsigned long flags;
+
+	write_lock_irqsave(&devtree_lock, flags);
+
+	parent = np->parent;
+	if (!parent)
+		goto out_unlock;
+
+	if (allnodes == np)
+		allnodes = np->allnext;
+	else {
+		struct device_node *prev;
+		for (prev = allnodes;
+		     prev->allnext != np;
+		     prev = prev->allnext)
+			;
+		prev->allnext = np->allnext;
+	}
+
+	if (parent->child == np)
+		parent->child = np->sibling;
+	else {
+		struct device_node *prevsib;
+		for (prevsib = np->parent->child;
+		     prevsib->sibling != np;
+		     prevsib = prevsib->sibling)
+			;
+		prevsib->sibling = np->sibling;
+	}
+
+	of_node_set_flag(np, OF_DETACHED);
+
+out_unlock:
+	write_unlock_irqrestore(&devtree_lock, flags);
+}
+#endif /* defined(CONFIG_OF_DYNAMIC) */
+
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
new file mode 100644
index 0000000..406757a
--- /dev/null
+++ b/drivers/of/fdt.c
@@ -0,0 +1,590 @@
+/*
+ * Functions for working with the Flattened Device Tree data format
+ *
+ * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
+ * benh@kernel.crashing.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/initrd.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#ifdef CONFIG_PPC
+#include <asm/machdep.h>
+#endif /* CONFIG_PPC */
+
+#include <asm/page.h>
+
+int __initdata dt_root_addr_cells;
+int __initdata dt_root_size_cells;
+
+struct boot_param_header *initial_boot_params;
+
+char *find_flat_dt_string(u32 offset)
+{
+	return ((char *)initial_boot_params) +
+		be32_to_cpu(initial_boot_params->off_dt_strings) + offset;
+}
+
+/**
+ * of_scan_flat_dt - scan flattened tree blob and call callback on each.
+ * @it: callback function
+ * @data: context data pointer
+ *
+ * This function is used to scan the flattened device-tree, it is
+ * used to extract the memory information at boot before we can
+ * unflatten the tree
+ */
+int __init of_scan_flat_dt(int (*it)(unsigned long node,
+				     const char *uname, int depth,
+				     void *data),
+			   void *data)
+{
+	unsigned long p = ((unsigned long)initial_boot_params) +
+		be32_to_cpu(initial_boot_params->off_dt_struct);
+	int rc = 0;
+	int depth = -1;
+
+	do {
+		u32 tag = be32_to_cpup((__be32 *)p);
+		char *pathp;
+
+		p += 4;
+		if (tag == OF_DT_END_NODE) {
+			depth--;
+			continue;
+		}
+		if (tag == OF_DT_NOP)
+			continue;
+		if (tag == OF_DT_END)
+			break;
+		if (tag == OF_DT_PROP) {
+			u32 sz = be32_to_cpup((__be32 *)p);
+			p += 8;
+			if (be32_to_cpu(initial_boot_params->version) < 0x10)
+				p = _ALIGN(p, sz >= 8 ? 8 : 4);
+			p += sz;
+			p = _ALIGN(p, 4);
+			continue;
+		}
+		if (tag != OF_DT_BEGIN_NODE) {
+			pr_err("Invalid tag %x in flat device tree!\n", tag);
+			return -EINVAL;
+		}
+		depth++;
+		pathp = (char *)p;
+		p = _ALIGN(p + strlen(pathp) + 1, 4);
+		if ((*pathp) == '/') {
+			char *lp, *np;
+			for (lp = NULL, np = pathp; *np; np++)
+				if ((*np) == '/')
+					lp = np+1;
+			if (lp != NULL)
+				pathp = lp;
+		}
+		rc = it(p, pathp, depth, data);
+		if (rc != 0)
+			break;
+	} while (1);
+
+	return rc;
+}
+
+/**
+ * of_get_flat_dt_root - find the root node in the flat blob
+ */
+unsigned long __init of_get_flat_dt_root(void)
+{
+	unsigned long p = ((unsigned long)initial_boot_params) +
+		be32_to_cpu(initial_boot_params->off_dt_struct);
+
+	while (be32_to_cpup((__be32 *)p) == OF_DT_NOP)
+		p += 4;
+	BUG_ON(be32_to_cpup((__be32 *)p) != OF_DT_BEGIN_NODE);
+	p += 4;
+	return _ALIGN(p + strlen((char *)p) + 1, 4);
+}
+
+/**
+ * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
+ *
+ * This function can be used within scan_flattened_dt callback to get
+ * access to properties
+ */
+void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
+				 unsigned long *size)
+{
+	unsigned long p = node;
+
+	do {
+		u32 tag = be32_to_cpup((__be32 *)p);
+		u32 sz, noff;
+		const char *nstr;
+
+		p += 4;
+		if (tag == OF_DT_NOP)
+			continue;
+		if (tag != OF_DT_PROP)
+			return NULL;
+
+		sz = be32_to_cpup((__be32 *)p);
+		noff = be32_to_cpup((__be32 *)(p + 4));
+		p += 8;
+		if (be32_to_cpu(initial_boot_params->version) < 0x10)
+			p = _ALIGN(p, sz >= 8 ? 8 : 4);
+
+		nstr = find_flat_dt_string(noff);
+		if (nstr == NULL) {
+			pr_warning("Can't find property index name !\n");
+			return NULL;
+		}
+		if (strcmp(name, nstr) == 0) {
+			if (size)
+				*size = sz;
+			return (void *)p;
+		}
+		p += sz;
+		p = _ALIGN(p, 4);
+	} while (1);
+}
+
+/**
+ * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
+ * @node: node to test
+ * @compat: compatible string to compare with compatible list.
+ */
+int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
+{
+	const char *cp;
+	unsigned long cplen, l;
+
+	cp = of_get_flat_dt_prop(node, "compatible", &cplen);
+	if (cp == NULL)
+		return 0;
+	while (cplen > 0) {
+		if (strncasecmp(cp, compat, strlen(compat)) == 0)
+			return 1;
+		l = strlen(cp) + 1;
+		cp += l;
+		cplen -= l;
+	}
+
+	return 0;
+}
+
+static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
+				       unsigned long align)
+{
+	void *res;
+
+	*mem = _ALIGN(*mem, align);
+	res = (void *)*mem;
+	*mem += size;
+
+	return res;
+}
+
+/**
+ * unflatten_dt_node - Alloc and populate a device_node from the flat tree
+ * @p: pointer to node in flat tree
+ * @dad: Parent struct device_node
+ * @allnextpp: pointer to ->allnext from last allocated device_node
+ * @fpsize: Size of the node path up at the current depth.
+ */
+unsigned long __init unflatten_dt_node(unsigned long mem,
+					unsigned long *p,
+					struct device_node *dad,
+					struct device_node ***allnextpp,
+					unsigned long fpsize)
+{
+	struct device_node *np;
+	struct property *pp, **prev_pp = NULL;
+	char *pathp;
+	u32 tag;
+	unsigned int l, allocl;
+	int has_name = 0;
+	int new_format = 0;
+
+	tag = be32_to_cpup((__be32 *)(*p));
+	if (tag != OF_DT_BEGIN_NODE) {
+		pr_err("Weird tag at start of node: %x\n", tag);
+		return mem;
+	}
+	*p += 4;
+	pathp = (char *)*p;
+	l = allocl = strlen(pathp) + 1;
+	*p = _ALIGN(*p + l, 4);
+
+	/* version 0x10 has a more compact unit name here instead of the full
+	 * path. we accumulate the full path size using "fpsize", we'll rebuild
+	 * it later. We detect this because the first character of the name is
+	 * not '/'.
+	 */
+	if ((*pathp) != '/') {
+		new_format = 1;
+		if (fpsize == 0) {
+			/* root node: special case. fpsize accounts for path
+			 * plus terminating zero. root node only has '/', so
+			 * fpsize should be 2, but we want to avoid the first
+			 * level nodes to have two '/' so we use fpsize 1 here
+			 */
+			fpsize = 1;
+			allocl = 2;
+		} else {
+			/* account for '/' and path size minus terminal 0
+			 * already in 'l'
+			 */
+			fpsize += l;
+			allocl = fpsize;
+		}
+	}
+
+	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
+				__alignof__(struct device_node));
+	if (allnextpp) {
+		memset(np, 0, sizeof(*np));
+		np->full_name = ((char *)np) + sizeof(struct device_node);
+		if (new_format) {
+			char *fn = np->full_name;
+			/* rebuild full path for new format */
+			if (dad && dad->parent) {
+				strcpy(fn, dad->full_name);
+#ifdef DEBUG
+				if ((strlen(fn) + l + 1) != allocl) {
+					pr_debug("%s: p: %d, l: %d, a: %d\n",
+						pathp, (int)strlen(fn),
+						l, allocl);
+				}
+#endif
+				fn += strlen(fn);
+			}
+			*(fn++) = '/';
+			memcpy(fn, pathp, l);
+		} else
+			memcpy(np->full_name, pathp, l);
+		prev_pp = &np->properties;
+		**allnextpp = np;
+		*allnextpp = &np->allnext;
+		if (dad != NULL) {
+			np->parent = dad;
+			/* we temporarily use the next field as `last_child'*/
+			if (dad->next == NULL)
+				dad->child = np;
+			else
+				dad->next->sibling = np;
+			dad->next = np;
+		}
+		kref_init(&np->kref);
+	}
+	while (1) {
+		u32 sz, noff;
+		char *pname;
+
+		tag = be32_to_cpup((__be32 *)(*p));
+		if (tag == OF_DT_NOP) {
+			*p += 4;
+			continue;
+		}
+		if (tag != OF_DT_PROP)
+			break;
+		*p += 4;
+		sz = be32_to_cpup((__be32 *)(*p));
+		noff = be32_to_cpup((__be32 *)((*p) + 4));
+		*p += 8;
+		if (be32_to_cpu(initial_boot_params->version) < 0x10)
+			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
+
+		pname = find_flat_dt_string(noff);
+		if (pname == NULL) {
+			pr_info("Can't find property name in list !\n");
+			break;
+		}
+		if (strcmp(pname, "name") == 0)
+			has_name = 1;
+		l = strlen(pname) + 1;
+		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
+					__alignof__(struct property));
+		if (allnextpp) {
+			/* We accept flattened tree phandles either in
+			 * ePAPR-style "phandle" properties, or the
+			 * legacy "linux,phandle" properties.  If both
+			 * appear and have different values, things
+			 * will get weird.  Don't do that. */
+			if ((strcmp(pname, "phandle") == 0) ||
+			    (strcmp(pname, "linux,phandle") == 0)) {
+				if (np->phandle == 0)
+					np->phandle = *((u32 *)*p);
+			}
+			/* And we process the "ibm,phandle" property
+			 * used in pSeries dynamic device tree
+			 * stuff */
+			if (strcmp(pname, "ibm,phandle") == 0)
+				np->phandle = *((u32 *)*p);
+			pp->name = pname;
+			pp->length = sz;
+			pp->value = (void *)*p;
+			*prev_pp = pp;
+			prev_pp = &pp->next;
+		}
+		*p = _ALIGN((*p) + sz, 4);
+	}
+	/* with version 0x10 we may not have the name property, recreate
+	 * it here from the unit name if absent
+	 */
+	if (!has_name) {
+		char *p1 = pathp, *ps = pathp, *pa = NULL;
+		int sz;
+
+		while (*p1) {
+			if ((*p1) == '@')
+				pa = p1;
+			if ((*p1) == '/')
+				ps = p1 + 1;
+			p1++;
+		}
+		if (pa < ps)
+			pa = p1;
+		sz = (pa - ps) + 1;
+		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
+					__alignof__(struct property));
+		if (allnextpp) {
+			pp->name = "name";
+			pp->length = sz;
+			pp->value = pp + 1;
+			*prev_pp = pp;
+			prev_pp = &pp->next;
+			memcpy(pp->value, ps, sz - 1);
+			((char *)pp->value)[sz - 1] = 0;
+			pr_debug("fixed up name for %s -> %s\n", pathp,
+				(char *)pp->value);
+		}
+	}
+	if (allnextpp) {
+		*prev_pp = NULL;
+		np->name = of_get_property(np, "name", NULL);
+		np->type = of_get_property(np, "device_type", NULL);
+
+		if (!np->name)
+			np->name = "<NULL>";
+		if (!np->type)
+			np->type = "<NULL>";
+	}
+	while (tag == OF_DT_BEGIN_NODE) {
+		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
+		tag = be32_to_cpup((__be32 *)(*p));
+	}
+	if (tag != OF_DT_END_NODE) {
+		pr_err("Weird tag at end of node: %x\n", tag);
+		return mem;
+	}
+	*p += 4;
+	return mem;
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+/**
+ * early_init_dt_check_for_initrd - Decode initrd location from flat tree
+ * @node: reference to node containing initrd location ('chosen')
+ */
+void __init early_init_dt_check_for_initrd(unsigned long node)
+{
+	unsigned long start, end, len;
+	__be32 *prop;
+
+	pr_debug("Looking for initrd properties... ");
+
+	prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
+	if (!prop)
+		return;
+	start = of_read_ulong(prop, len/4);
+
+	prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
+	if (!prop)
+		return;
+	end = of_read_ulong(prop, len/4);
+
+	early_init_dt_setup_initrd_arch(start, end);
+	pr_debug("initrd_start=0x%lx  initrd_end=0x%lx\n", start, end);
+}
+#else
+inline void early_init_dt_check_for_initrd(unsigned long node)
+{
+}
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+/**
+ * early_init_dt_scan_root - fetch the top level address and size cells
+ */
+int __init early_init_dt_scan_root(unsigned long node, const char *uname,
+				   int depth, void *data)
+{
+	__be32 *prop;
+
+	if (depth != 0)
+		return 0;
+
+	dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
+	dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
+
+	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
+	if (prop)
+		dt_root_size_cells = be32_to_cpup(prop);
+	pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
+
+	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
+	if (prop)
+		dt_root_addr_cells = be32_to_cpup(prop);
+	pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
+
+	/* break now */
+	return 1;
+}
+
+u64 __init dt_mem_next_cell(int s, __be32 **cellp)
+{
+	__be32 *p = *cellp;
+
+	*cellp = p + s;
+	return of_read_number(p, s);
+}
+
+/**
+ * early_init_dt_scan_memory - Look for an parse memory nodes
+ */
+int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
+				     int depth, void *data)
+{
+	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+	__be32 *reg, *endp;
+	unsigned long l;
+
+	/* We are scanning "memory" nodes only */
+	if (type == NULL) {
+		/*
+		 * The longtrail doesn't have a device_type on the
+		 * /memory node, so look for the node called /memory@0.
+		 */
+		if (depth != 1 || strcmp(uname, "memory@0") != 0)
+			return 0;
+	} else if (strcmp(type, "memory") != 0)
+		return 0;
+
+	reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
+	if (reg == NULL)
+		reg = of_get_flat_dt_prop(node, "reg", &l);
+	if (reg == NULL)
+		return 0;
+
+	endp = reg + (l / sizeof(__be32));
+
+	pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
+	    uname, l, reg[0], reg[1], reg[2], reg[3]);
+
+	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
+		u64 base, size;
+
+		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
+		size = dt_mem_next_cell(dt_root_size_cells, &reg);
+
+		if (size == 0)
+			continue;
+		pr_debug(" - %llx ,  %llx\n", (unsigned long long)base,
+		    (unsigned long long)size);
+
+		early_init_dt_add_memory_arch(base, size);
+	}
+
+	return 0;
+}
+
+int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
+				     int depth, void *data)
+{
+	unsigned long l;
+	char *p;
+
+	pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
+
+	if (depth != 1 ||
+	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
+		return 0;
+
+	early_init_dt_check_for_initrd(node);
+
+	/* Retreive command line */
+	p = of_get_flat_dt_prop(node, "bootargs", &l);
+	if (p != NULL && l > 0)
+		strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
+
+#ifdef CONFIG_CMDLINE
+#ifndef CONFIG_CMDLINE_FORCE
+	if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
+#endif
+		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#endif /* CONFIG_CMDLINE */
+
+	early_init_dt_scan_chosen_arch(node);
+
+	pr_debug("Command line is: %s\n", cmd_line);
+
+	/* break now */
+	return 1;
+}
+
+/**
+ * unflatten_device_tree - create tree of device_nodes from flat blob
+ *
+ * unflattens the device-tree passed by the firmware, creating the
+ * tree of struct device_node. It also fills the "name" and "type"
+ * pointers of the nodes so the normal device-tree walking functions
+ * can be used.
+ */
+void __init unflatten_device_tree(void)
+{
+	unsigned long start, mem, size;
+	struct device_node **allnextp = &allnodes;
+
+	pr_debug(" -> unflatten_device_tree()\n");
+
+	/* First pass, scan for size */
+	start = ((unsigned long)initial_boot_params) +
+		be32_to_cpu(initial_boot_params->off_dt_struct);
+	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
+	size = (size | 3) + 1;
+
+	pr_debug("  size is %lx, allocating...\n", size);
+
+	/* Allocate memory for the expanded device tree */
+	mem = early_init_dt_alloc_memory_arch(size + 4,
+			__alignof__(struct device_node));
+	mem = (unsigned long) __va(mem);
+
+	((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
+
+	pr_debug("  unflattening %lx...\n", mem);
+
+	/* Second pass, do actual unflattening */
+	start = ((unsigned long)initial_boot_params) +
+		be32_to_cpu(initial_boot_params->off_dt_struct);
+	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
+	if (be32_to_cpup((__be32 *)start) != OF_DT_END)
+		pr_warning("Weird tag at end of tree: %08x\n", *((u32 *)start));
+	if (be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef)
+		pr_warning("End of tree marker overwritten: %08x\n",
+			   be32_to_cpu(((__be32 *)mem)[size / 4]));
+	*allnextp = NULL;
+
+	/* Get pointer to OF "/chosen" node for use everywhere */
+	of_chosen = of_find_node_by_path("/chosen");
+	if (of_chosen == NULL)
+		of_chosen = of_find_node_by_path("/chosen@0");
+
+	pr_debug(" <- unflatten_device_tree()\n");
+}
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index 6eea601..24c3606 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -36,7 +36,7 @@
 	struct of_gpio_chip *of_gc = NULL;
 	int size;
 	const void *gpio_spec;
-	const u32 *gpio_cells;
+	const __be32 *gpio_cells;
 
 	ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index,
 					  &gc, &gpio_spec);
@@ -55,7 +55,7 @@
 
 	gpio_cells = of_get_property(gc, "#gpio-cells", &size);
 	if (!gpio_cells || size != sizeof(*gpio_cells) ||
-			*gpio_cells != of_gc->gpio_cells) {
+			be32_to_cpup(gpio_cells) != of_gc->gpio_cells) {
 		pr_debug("%s: wrong #gpio-cells for %s\n",
 			 np->full_name, gc->full_name);
 		ret = -EINVAL;
@@ -127,7 +127,8 @@
 int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np,
 			 const void *gpio_spec, enum of_gpio_flags *flags)
 {
-	const u32 *gpio = gpio_spec;
+	const __be32 *gpio = gpio_spec;
+	const u32 n = be32_to_cpup(gpio);
 
 	/*
 	 * We're discouraging gpio_cells < 2, since that way you'll have to
@@ -140,13 +141,13 @@
 		return -EINVAL;
 	}
 
-	if (*gpio > of_gc->gc.ngpio)
+	if (n > of_gc->gc.ngpio)
 		return -EINVAL;
 
 	if (flags)
-		*flags = gpio[1];
+		*flags = be32_to_cpu(gpio[1]);
 
-	return *gpio;
+	return n;
 }
 EXPORT_SYMBOL(of_gpio_simple_xlate);
 
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index fa65a2b..a3a708e 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -25,7 +25,7 @@
 	for_each_child_of_node(adap_node, node) {
 		struct i2c_board_info info = {};
 		struct dev_archdata dev_ad = {};
-		const u32 *addr;
+		const __be32 *addr;
 		int len;
 
 		if (of_modalias_node(node, info.type, sizeof(info.type)) < 0)
@@ -40,7 +40,7 @@
 
 		info.irq = irq_of_parse_and_map(node, 0);
 
-		info.addr = *addr;
+		info.addr = be32_to_cpup(addr);
 
 		dev_archdata_set_node(&dev_ad, node);
 		info.archdata = &dev_ad;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 4b22ba5..18ecae4 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -51,7 +51,7 @@
 
 	/* Loop over the child nodes and register a phy_device for each one */
 	for_each_child_of_node(np, child) {
-		const u32 *addr;
+		const __be32 *addr;
 		int len;
 
 		/* A PHY must have a reg property in the range [0-31] */
@@ -68,7 +68,7 @@
 				mdio->irq[*addr] = PHY_POLL;
 		}
 
-		phy = get_phy_device(mdio, *addr);
+		phy = get_phy_device(mdio, be32_to_cpup(addr));
 		if (!phy) {
 			dev_err(&mdio->dev, "error probing PHY at address %i\n",
 				*addr);
@@ -160,7 +160,7 @@
 	struct device_node *net_np;
 	char bus_id[MII_BUS_ID_SIZE + 3];
 	struct phy_device *phy;
-	const u32 *phy_id;
+	const __be32 *phy_id;
 	int sz;
 
 	if (!dev->dev.parent)
@@ -174,7 +174,7 @@
 	if (!phy_id || sz < sizeof(*phy_id))
 		return NULL;
 
-	sprintf(bus_id, PHY_ID_FMT, "0", phy_id[0]);
+	sprintf(bus_id, PHY_ID_FMT, "0", be32_to_cpu(phy_id[0]));
 
 	phy = phy_connect(dev, bus_id, hndlr, 0, iface);
 	return IS_ERR(phy) ? NULL : phy;
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
index bed0ed6..f65f48b 100644
--- a/drivers/of/of_spi.c
+++ b/drivers/of/of_spi.c
@@ -23,7 +23,7 @@
 {
 	struct spi_device *spi;
 	struct device_node *nc;
-	const u32 *prop;
+	const __be32 *prop;
 	int rc;
 	int len;
 
@@ -54,7 +54,7 @@
 			spi_dev_put(spi);
 			continue;
 		}
-		spi->chip_select = *prop;
+		spi->chip_select = be32_to_cpup(prop);
 
 		/* Mode (clock phase/polarity/etc.) */
 		if (of_find_property(nc, "spi-cpha", NULL))
@@ -72,7 +72,7 @@
 			spi_dev_put(spi);
 			continue;
 		}
-		spi->max_speed_hz = *prop;
+		spi->max_speed_hz = be32_to_cpup(prop);
 
 		/* IRQ */
 		spi->irq = irq_of_parse_and_map(nc, 0);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index b1ecefa..7858a11 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -21,17 +21,6 @@
 
 	   If you don't know what to do here, say N.
 
-config PCI_LEGACY
-	bool "Enable deprecated pci_find_* API"
-	depends on PCI
-	default y
-	help
-	  Say Y here if you want to include support for the deprecated
-	  pci_find_device() API.  Most drivers have been converted over
-	  to using the proper hotplug APIs, so this option serves to
-	  include/exclude only a few drivers that are still using this
-	  API.
-
 config PCI_DEBUG
 	bool "PCI Debugging"
 	depends on PCI && DEBUG_KERNEL
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 4df48d5..8674c1e 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,14 +2,13 @@
 # Makefile for the PCI bus specific drivers.
 #
 
-obj-y		+= access.o bus.o probe.o remove.o pci.o quirks.o \
+obj-y		+= access.o bus.o probe.o remove.o pci.o \
 			pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
 			irq.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_SYSFS) += slot.o
 
-obj-$(CONFIG_PCI_LEGACY) += legacy.o
-CFLAGS_legacy.o += -Wno-deprecated-declarations
+obj-$(CONFIG_PCI_QUIRKS) += quirks.o
 
 # Build PCI Express stuff if needed
 obj-$(CONFIG_PCIEPORTBUS) += pcie/
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index cef28a7..712250f 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -17,6 +17,52 @@
 
 #include "pci.h"
 
+void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
+			  unsigned int flags)
+{
+	struct pci_bus_resource *bus_res;
+
+	bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
+	if (!bus_res) {
+		dev_err(&bus->dev, "can't add %pR resource\n", res);
+		return;
+	}
+
+	bus_res->res = res;
+	bus_res->flags = flags;
+	list_add_tail(&bus_res->list, &bus->resources);
+}
+
+struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
+{
+	struct pci_bus_resource *bus_res;
+
+	if (n < PCI_BRIDGE_RESOURCE_NUM)
+		return bus->resource[n];
+
+	n -= PCI_BRIDGE_RESOURCE_NUM;
+	list_for_each_entry(bus_res, &bus->resources, list) {
+		if (n-- == 0)
+			return bus_res->res;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_bus_resource_n);
+
+void pci_bus_remove_resources(struct pci_bus *bus)
+{
+	struct pci_bus_resource *bus_res, *tmp;
+	int i;
+
+	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
+		bus->resource[i] = 0;
+
+	list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
+		list_del(&bus_res->list);
+		kfree(bus_res);
+	}
+}
+
 /**
  * pci_bus_alloc_resource - allocate a resource from a parent bus
  * @bus: PCI bus
@@ -36,11 +82,14 @@
 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
 		resource_size_t size, resource_size_t align,
 		resource_size_t min, unsigned int type_mask,
-		void (*alignf)(void *, struct resource *, resource_size_t,
-				resource_size_t),
+		resource_size_t (*alignf)(void *,
+					  const struct resource *,
+					  resource_size_t,
+					  resource_size_t),
 		void *alignf_data)
 {
 	int i, ret = -ENOMEM;
+	struct resource *r;
 	resource_size_t max = -1;
 
 	type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
@@ -49,8 +98,7 @@
 	if (!(res->flags & IORESOURCE_MEM_64))
 		max = PCIBIOS_MAX_MEM_32;
 
-	for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
-		struct resource *r = bus->resource[i];
+	pci_bus_for_each_resource(bus, r, i) {
 		if (!r)
 			continue;
 
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 4dd7114..efa9f2d 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -332,8 +332,6 @@
 	slot->hotplug_slot->info->attention_status = 0;
 	slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot);
 	slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot);
-	slot->hotplug_slot->info->max_bus_speed = PCI_SPEED_UNKNOWN;
-	slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
 
 	acpiphp_slot->slot = slot;
 	snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun);
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
index 148fb46..fb3f846 100644
--- a/drivers/pci/hotplug/cpcihp_generic.c
+++ b/drivers/pci/hotplug/cpcihp_generic.c
@@ -162,6 +162,7 @@
 	dev = pci_get_slot(bus, PCI_DEVFN(bridge_slot, 0));
 	if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
 		err("Invalid bridge device %s", bridge);
+		pci_dev_put(dev);
 		return -EINVAL;
 	}
 	bus = dev->subordinate;
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 9c6a9fd..d8ffc73 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -310,8 +310,6 @@
 	u8 first_slot;
 	u8 add_support;
 	u8 push_flag;
-	enum pci_bus_speed speed;
-	enum pci_bus_speed speed_capability;
 	u8 push_button;			/* 0 = no pushbutton, 1 = pushbutton present */
 	u8 slot_switch_type;		/* 0 = no switch, 1 = switch present */
 	u8 defeature_PHP;		/* 0 = PHP not supported, 1 = PHP supported */
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 075b4f4..f184d1d 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -583,30 +583,6 @@
 	return 0;
 }
 
-static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
-{
-	struct slot *slot = hotplug_slot->private;
-	struct controller *ctrl = slot->ctrl;
-
-	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
-
-	*value = ctrl->speed_capability;
-
-	return 0;
-}
-
-static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
-{
-	struct slot *slot = hotplug_slot->private;
-	struct controller *ctrl = slot->ctrl;
-
-	dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
-
-	*value = ctrl->speed;
-
-	return 0;
-}
-
 static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
 	.set_attention_status =	set_attention_status,
 	.enable_slot =		process_SI,
@@ -616,8 +592,6 @@
 	.get_attention_status =	get_attention_status,
 	.get_latch_status =	get_latch_status,
 	.get_adapter_status =	get_adapter_status,
-	.get_max_bus_speed =	get_max_bus_speed,
-	.get_cur_bus_speed =	get_cur_bus_speed,
 };
 
 #define SLOT_NAME_SIZE 10
@@ -629,6 +603,7 @@
 	struct slot *slot;
 	struct hotplug_slot *hotplug_slot;
 	struct hotplug_slot_info *hotplug_slot_info;
+	struct pci_bus *bus = ctrl->pci_bus;
 	u8 number_of_slots;
 	u8 slot_device;
 	u8 slot_number;
@@ -694,7 +669,7 @@
 			slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
 		if (is_slot66mhz(slot))
 			slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
-		if (ctrl->speed == PCI_SPEED_66MHz)
+		if (bus->cur_bus_speed == PCI_SPEED_66MHz)
 			slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
 
 		ctrl_slot =
@@ -844,6 +819,7 @@
 	u32 rc;
 	struct controller *ctrl;
 	struct pci_func *func;
+	struct pci_bus *bus;
 	int err;
 
 	err = pci_enable_device(pdev);
@@ -852,6 +828,7 @@
 			pci_name(pdev), err);
 		return err;
 	}
+	bus = pdev->subordinate;
 
 	/* Need to read VID early b/c it's used to differentiate CPQ and INTC
 	 * discovery
@@ -929,22 +906,22 @@
 			pci_read_config_byte(pdev, 0x41, &bus_cap);
 			if (bus_cap & 0x80) {
 				dbg("bus max supports 133MHz PCI-X\n");
-				ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
+				bus->max_bus_speed = PCI_SPEED_133MHz_PCIX;
 				break;
 			}
 			if (bus_cap & 0x40) {
 				dbg("bus max supports 100MHz PCI-X\n");
-				ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+				bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
 				break;
 			}
 			if (bus_cap & 20) {
 				dbg("bus max supports 66MHz PCI-X\n");
-				ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
+				bus->max_bus_speed = PCI_SPEED_66MHz_PCIX;
 				break;
 			}
 			if (bus_cap & 10) {
 				dbg("bus max supports 66MHz PCI\n");
-				ctrl->speed_capability = PCI_SPEED_66MHz;
+				bus->max_bus_speed = PCI_SPEED_66MHz;
 				break;
 			}
 
@@ -955,7 +932,7 @@
 		case PCI_SUB_HPC_ID:
 			/* Original 6500/7000 implementation */
 			ctrl->slot_switch_type = 1;
-			ctrl->speed_capability = PCI_SPEED_33MHz;
+			bus->max_bus_speed = PCI_SPEED_33MHz;
 			ctrl->push_button = 0;
 			ctrl->pci_config_space = 1;
 			ctrl->defeature_PHP = 1;
@@ -966,7 +943,7 @@
 			/* First Pushbutton implementation */
 			ctrl->push_flag = 1;
 			ctrl->slot_switch_type = 1;
-			ctrl->speed_capability = PCI_SPEED_33MHz;
+			bus->max_bus_speed = PCI_SPEED_33MHz;
 			ctrl->push_button = 1;
 			ctrl->pci_config_space = 1;
 			ctrl->defeature_PHP = 1;
@@ -976,7 +953,7 @@
 		case PCI_SUB_HPC_ID_INTC:
 			/* Third party (6500/7000) */
 			ctrl->slot_switch_type = 1;
-			ctrl->speed_capability = PCI_SPEED_33MHz;
+			bus->max_bus_speed = PCI_SPEED_33MHz;
 			ctrl->push_button = 0;
 			ctrl->pci_config_space = 1;
 			ctrl->defeature_PHP = 1;
@@ -987,7 +964,7 @@
 			/* First 66 Mhz implementation */
 			ctrl->push_flag = 1;
 			ctrl->slot_switch_type = 1;
-			ctrl->speed_capability = PCI_SPEED_66MHz;
+			bus->max_bus_speed = PCI_SPEED_66MHz;
 			ctrl->push_button = 1;
 			ctrl->pci_config_space = 1;
 			ctrl->defeature_PHP = 1;
@@ -998,7 +975,7 @@
 			/* First PCI-X implementation, 100MHz */
 			ctrl->push_flag = 1;
 			ctrl->slot_switch_type = 1;
-			ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+			bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
 			ctrl->push_button = 1;
 			ctrl->pci_config_space = 1;
 			ctrl->defeature_PHP = 1;
@@ -1015,9 +992,9 @@
 	case PCI_VENDOR_ID_INTEL:
 		/* Check for speed capability (0=33, 1=66) */
 		if (subsystem_deviceid & 0x0001)
-			ctrl->speed_capability = PCI_SPEED_66MHz;
+			bus->max_bus_speed = PCI_SPEED_66MHz;
 		else
-			ctrl->speed_capability = PCI_SPEED_33MHz;
+			bus->max_bus_speed = PCI_SPEED_33MHz;
 
 		/* Check for push button */
 		if (subsystem_deviceid & 0x0002)
@@ -1079,7 +1056,7 @@
 					pdev->bus->number);
 
 	dbg("Hotplug controller capabilities:\n");
-	dbg("    speed_capability       %d\n", ctrl->speed_capability);
+	dbg("    speed_capability       %d\n", bus->max_bus_speed);
 	dbg("    slot_switch_type       %s\n", ctrl->slot_switch_type ?
 					"switch present" : "no switch");
 	dbg("    defeature_PHP          %s\n", ctrl->defeature_PHP ?
@@ -1142,7 +1119,7 @@
 	}
 
 	/* Check for 66Mhz operation */
-	ctrl->speed = get_controller_speed(ctrl);
+	bus->cur_bus_speed = get_controller_speed(ctrl);
 
 
 	/********************************************************
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 0ff689a..e43908d 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1130,12 +1130,13 @@
 static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot)
 {
 	struct slot *slot;
+	struct pci_bus *bus = ctrl->pci_bus;
 	u8 reg;
 	u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
 	u16 reg16;
 	u32 leds = readl(ctrl->hpc_reg + LED_CONTROL);
 
-	if (ctrl->speed == adapter_speed)
+	if (bus->cur_bus_speed == adapter_speed)
 		return 0;
 
 	/* We don't allow freq/mode changes if we find another adapter running
@@ -1152,7 +1153,7 @@
 		 * lower speed/mode, we allow the new adapter to function at
 		 * this rate if supported
 		 */
-		if (ctrl->speed < adapter_speed)
+		if (bus->cur_bus_speed < adapter_speed)
 			return 0;
 
 		return 1;
@@ -1161,20 +1162,20 @@
 	/* If the controller doesn't support freq/mode changes and the
 	 * controller is running at a higher mode, we bail
 	 */
-	if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability))
+	if ((bus->cur_bus_speed > adapter_speed) && (!ctrl->pcix_speed_capability))
 		return 1;
 
 	/* But we allow the adapter to run at a lower rate if possible */
-	if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability))
+	if ((bus->cur_bus_speed < adapter_speed) && (!ctrl->pcix_speed_capability))
 		return 0;
 
 	/* We try to set the max speed supported by both the adapter and
 	 * controller
 	 */
-	if (ctrl->speed_capability < adapter_speed) {
-		if (ctrl->speed == ctrl->speed_capability)
+	if (bus->max_bus_speed < adapter_speed) {
+		if (bus->cur_bus_speed == bus->max_bus_speed)
 			return 0;
-		adapter_speed = ctrl->speed_capability;
+		adapter_speed = bus->max_bus_speed;
 	}
 
 	writel(0x0L, ctrl->hpc_reg + LED_CONTROL);
@@ -1229,8 +1230,8 @@
 	pci_write_config_byte(ctrl->pci_dev, 0x43, reg);
 
 	/* Only if mode change...*/
-	if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
-		((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) 
+	if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
+		((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) 
 			set_SOGO(ctrl);
 
 	wait_for_ctrl_irq(ctrl);
@@ -1243,7 +1244,7 @@
 	set_SOGO(ctrl);
 	wait_for_ctrl_irq(ctrl);
 
-	ctrl->speed = adapter_speed;
+	bus->cur_bus_speed = adapter_speed;
 	slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
 
 	info("Successfully changed frequency/mode for adapter in slot %d\n",
@@ -1269,6 +1270,7 @@
  */
 static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
 {
+	struct pci_bus *bus = ctrl->pci_bus;
 	u8 hp_slot;
 	u8 temp_byte;
 	u8 adapter_speed;
@@ -1309,7 +1311,7 @@
 		wait_for_ctrl_irq (ctrl);
 
 		adapter_speed = get_adapter_speed(ctrl, hp_slot);
-		if (ctrl->speed != adapter_speed)
+		if (bus->cur_bus_speed != adapter_speed)
 			if (set_controller_speed(ctrl, adapter_speed, hp_slot))
 				rc = WRONG_BUS_FREQUENCY;
 
@@ -1426,6 +1428,7 @@
 	u32 temp_register = 0xFFFFFFFF;
 	u32 rc = 0;
 	struct pci_func *new_slot = NULL;
+	struct pci_bus *bus = ctrl->pci_bus;
 	struct slot *p_slot;
 	struct resource_lists res_lists;
 
@@ -1456,7 +1459,7 @@
 	wait_for_ctrl_irq (ctrl);
 
 	adapter_speed = get_adapter_speed(ctrl, hp_slot);
-	if (ctrl->speed != adapter_speed)
+	if (bus->cur_bus_speed != adapter_speed)
 		if (set_controller_speed(ctrl, adapter_speed, hp_slot))
 			rc = WRONG_BUS_FREQUENCY;
 
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 7485ffd..d934dd4 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -395,89 +395,40 @@
 	return rc;
 }
 
-static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
+static int get_max_bus_speed(struct slot *slot)
 {
-	int rc = -ENODEV;
-	struct slot *pslot;
+	int rc;
 	u8 mode = 0;
+	enum pci_bus_speed speed;
+	struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus;
 
-	debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__,
-		hotplug_slot, value);
+	debug("%s - Entry slot[%p]\n", __func__, slot);
 
 	ibmphp_lock_operations();
+	mode = slot->supported_bus_mode;
+	speed = slot->supported_speed; 
+	ibmphp_unlock_operations();
 
-	if (hotplug_slot) {
-		pslot = hotplug_slot->private;
-		if (pslot) {
-			rc = 0;
-			mode = pslot->supported_bus_mode;
-			*value = pslot->supported_speed; 
-			switch (*value) {
-			case BUS_SPEED_33:
-				break;
-			case BUS_SPEED_66:
-				if (mode == BUS_MODE_PCIX) 
-					*value += 0x01;
-				break;
-			case BUS_SPEED_100:
-			case BUS_SPEED_133:
-				*value = pslot->supported_speed + 0x01;
-				break;
-			default:
-				/* Note (will need to change): there would be soon 256, 512 also */
-				rc = -ENODEV;
-			}
-		}
+	switch (speed) {
+	case BUS_SPEED_33:
+		break;
+	case BUS_SPEED_66:
+		if (mode == BUS_MODE_PCIX) 
+			speed += 0x01;
+		break;
+	case BUS_SPEED_100:
+	case BUS_SPEED_133:
+		speed += 0x01;
+		break;
+	default:
+		/* Note (will need to change): there would be soon 256, 512 also */
+		rc = -ENODEV;
 	}
 
-	ibmphp_unlock_operations();
-	debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value);
-	return rc;
-}
+	if (!rc)
+		bus->max_bus_speed = speed;
 
-static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
-{
-	int rc = -ENODEV;
-	struct slot *pslot;
-	u8 mode = 0;
-
-	debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__,
-		hotplug_slot, value);
-
-	ibmphp_lock_operations();
-
-	if (hotplug_slot) {
-		pslot = hotplug_slot->private;
-		if (pslot) {
-			rc = get_cur_bus_info(&pslot);
-			if (!rc) {
-				mode = pslot->bus_on->current_bus_mode;
-				*value = pslot->bus_on->current_speed;
-				switch (*value) {
-				case BUS_SPEED_33:
-					break;
-				case BUS_SPEED_66:
-					if (mode == BUS_MODE_PCIX) 
-						*value += 0x01;
-					else if (mode == BUS_MODE_PCI)
-						;
-					else
-						*value = PCI_SPEED_UNKNOWN;
-					break;
-				case BUS_SPEED_100:
-				case BUS_SPEED_133:
-					*value += 0x01;
-					break;
-				default:
-					/* Note of change: there would also be 256, 512 soon */
-					rc = -ENODEV;
-				}
-			}
-		}
-	}
-
-	ibmphp_unlock_operations();
-	debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value);
+	debug("%s - Exit rc[%d] speed[%x]\n", __func__, rc, speed);
 	return rc;
 }
 
@@ -572,6 +523,7 @@
 		if (slot_cur->bus_on->current_speed == 0xFF) 
 			if (get_cur_bus_info(&slot_cur)) 
 				return -1;
+		get_max_bus_speed(slot_cur);
 
 		if (slot_cur->ctrl->options == 0xFF)
 			if (get_hpc_options(slot_cur, &slot_cur->ctrl->options))
@@ -655,6 +607,7 @@
 int ibmphp_update_slot_info(struct slot *slot_cur)
 {
 	struct hotplug_slot_info *info;
+	struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus;
 	int rc;
 	u8 bus_speed;
 	u8 mode;
@@ -700,8 +653,7 @@
 			bus_speed = PCI_SPEED_UNKNOWN;
 	}
 
-	info->cur_bus_speed = bus_speed;
-	info->max_bus_speed = slot_cur->hotplug_slot->info->max_bus_speed;
+	bus->cur_bus_speed = bus_speed;
 	// To do: bus_names 
 	
 	rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info);
@@ -1326,8 +1278,6 @@
 	.get_attention_status =		get_attention_status,
 	.get_latch_status =		get_latch_status,
 	.get_adapter_status =		get_adapter_present,
-	.get_max_bus_speed =		get_max_bus_speed,
-	.get_cur_bus_speed =		get_cur_bus_speed,
 /*	.get_max_adapter_speed =	get_max_adapter_speed,
 	.get_bus_name_status =		get_bus_name,
 */
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index c1abac8..5becbde 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -245,7 +245,7 @@
 
 int __init ibmphp_access_ebda (void)
 {
-	u8 format, num_ctlrs, rio_complete, hs_complete;
+	u8 format, num_ctlrs, rio_complete, hs_complete, ebda_sz;
 	u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base;
 	int rc = 0;
 
@@ -260,7 +260,16 @@
 	iounmap (io_mem);
 	debug ("returned ebda segment: %x\n", ebda_seg);
 	
-	io_mem = ioremap(ebda_seg<<4, 1024);
+	io_mem = ioremap(ebda_seg<<4, 1);
+	if (!io_mem)
+		return -ENOMEM;
+	ebda_sz = readb(io_mem);
+	iounmap(io_mem);
+	debug("ebda size: %d(KiB)\n", ebda_sz);
+	if (ebda_sz == 0)
+		return -ENOMEM;
+
+	io_mem = ioremap(ebda_seg<<4, (ebda_sz * 1024));
 	if (!io_mem )
 		return -ENOMEM;
 	next_offset = 0x180;
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index c7084f0..1aaf3f3 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -35,6 +35,7 @@
 #include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/sched.h>
+#include <linux/semaphore.h>
 #include <linux/kthread.h>
 #include "ibmphp.h"
 
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 38183a5..728b119 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -64,32 +64,6 @@
 static LIST_HEAD(pci_hotplug_slot_list);
 static DEFINE_MUTEX(pci_hp_mutex);
 
-/* these strings match up with the values in pci_bus_speed */
-static char *pci_bus_speed_strings[] = {
-	"33 MHz PCI",		/* 0x00 */
-	"66 MHz PCI",		/* 0x01 */
-	"66 MHz PCI-X",		/* 0x02 */
-	"100 MHz PCI-X",	/* 0x03 */
-	"133 MHz PCI-X",	/* 0x04 */
-	NULL,			/* 0x05 */
-	NULL,			/* 0x06 */
-	NULL,			/* 0x07 */
-	NULL,			/* 0x08 */
-	"66 MHz PCI-X 266",	/* 0x09 */
-	"100 MHz PCI-X 266",	/* 0x0a */
-	"133 MHz PCI-X 266",	/* 0x0b */
-	NULL,			/* 0x0c */
-	NULL,			/* 0x0d */
-	NULL,			/* 0x0e */
-	NULL,			/* 0x0f */
-	NULL,			/* 0x10 */
-	"66 MHz PCI-X 533",	/* 0x11 */
-	"100 MHz PCI-X 533",	/* 0x12 */
-	"133 MHz PCI-X 533",	/* 0x13 */
-	"2.5 GT/s PCIe",	/* 0x14 */
-	"5.0 GT/s PCIe",	/* 0x15 */
-};
-
 #ifdef CONFIG_HOTPLUG_PCI_CPCI
 extern int cpci_hotplug_init(int debug);
 extern void cpci_hotplug_exit(void);
@@ -118,8 +92,6 @@
 GET_STATUS(attention_status, u8)
 GET_STATUS(latch_status, u8)
 GET_STATUS(adapter_status, u8)
-GET_STATUS(max_bus_speed, enum pci_bus_speed)
-GET_STATUS(cur_bus_speed, enum pci_bus_speed)
 
 static ssize_t power_read_file(struct pci_slot *slot, char *buf)
 {
@@ -263,60 +235,6 @@
 	.show = presence_read_file,
 };
 
-static char *unknown_speed = "Unknown bus speed";
-
-static ssize_t max_bus_speed_read_file(struct pci_slot *slot, char *buf)
-{
-	char *speed_string;
-	int retval;
-	enum pci_bus_speed value;
-	
-	retval = get_max_bus_speed(slot->hotplug, &value);
-	if (retval)
-		goto exit;
-
-	if (value == PCI_SPEED_UNKNOWN)
-		speed_string = unknown_speed;
-	else
-		speed_string = pci_bus_speed_strings[value];
-	
-	retval = sprintf (buf, "%s\n", speed_string);
-
-exit:
-	return retval;
-}
-
-static struct pci_slot_attribute hotplug_slot_attr_max_bus_speed = {
-	.attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO},
-	.show = max_bus_speed_read_file,
-};
-
-static ssize_t cur_bus_speed_read_file(struct pci_slot *slot, char *buf)
-{
-	char *speed_string;
-	int retval;
-	enum pci_bus_speed value;
-
-	retval = get_cur_bus_speed(slot->hotplug, &value);
-	if (retval)
-		goto exit;
-
-	if (value == PCI_SPEED_UNKNOWN)
-		speed_string = unknown_speed;
-	else
-		speed_string = pci_bus_speed_strings[value];
-	
-	retval = sprintf (buf, "%s\n", speed_string);
-
-exit:
-	return retval;
-}
-
-static struct pci_slot_attribute hotplug_slot_attr_cur_bus_speed = {
-	.attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO},
-	.show = cur_bus_speed_read_file,
-};
-
 static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
 		size_t count)
 {
@@ -391,26 +309,6 @@
 	return false;
 }
 
-static bool has_max_bus_speed_file(struct pci_slot *pci_slot)
-{
-	struct hotplug_slot *slot = pci_slot->hotplug;
-	if ((!slot) || (!slot->ops))
-		return false;
-	if (slot->ops->get_max_bus_speed)
-		return true;
-	return false;
-}
-
-static bool has_cur_bus_speed_file(struct pci_slot *pci_slot)
-{
-	struct hotplug_slot *slot = pci_slot->hotplug;
-	if ((!slot) || (!slot->ops))
-		return false;
-	if (slot->ops->get_cur_bus_speed)
-		return true;
-	return false;
-}
-
 static bool has_test_file(struct pci_slot *pci_slot)
 {
 	struct hotplug_slot *slot = pci_slot->hotplug;
@@ -456,20 +354,6 @@
 			goto exit_adapter;
 	}
 
-	if (has_max_bus_speed_file(slot)) {
-		retval = sysfs_create_file(&slot->kobj,
-					&hotplug_slot_attr_max_bus_speed.attr);
-		if (retval)
-			goto exit_max_speed;
-	}
-
-	if (has_cur_bus_speed_file(slot)) {
-		retval = sysfs_create_file(&slot->kobj,
-					&hotplug_slot_attr_cur_bus_speed.attr);
-		if (retval)
-			goto exit_cur_speed;
-	}
-
 	if (has_test_file(slot)) {
 		retval = sysfs_create_file(&slot->kobj,
 					   &hotplug_slot_attr_test.attr);
@@ -480,14 +364,6 @@
 	goto exit;
 
 exit_test:
-	if (has_cur_bus_speed_file(slot))
-		sysfs_remove_file(&slot->kobj,
-				  &hotplug_slot_attr_cur_bus_speed.attr);
-exit_cur_speed:
-	if (has_max_bus_speed_file(slot))
-		sysfs_remove_file(&slot->kobj,
-				  &hotplug_slot_attr_max_bus_speed.attr);
-exit_max_speed:
 	if (has_adapter_file(slot))
 		sysfs_remove_file(&slot->kobj,
 				  &hotplug_slot_attr_presence.attr);
@@ -523,14 +399,6 @@
 		sysfs_remove_file(&slot->kobj,
 				  &hotplug_slot_attr_presence.attr);
 
-	if (has_max_bus_speed_file(slot))
-		sysfs_remove_file(&slot->kobj,
-				  &hotplug_slot_attr_max_bus_speed.attr);
-
-	if (has_cur_bus_speed_file(slot))
-		sysfs_remove_file(&slot->kobj,
-				  &hotplug_slot_attr_cur_bus_speed.attr);
-
 	if (has_test_file(slot))
 		sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
 
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 5674b20..920f820 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -69,8 +69,6 @@
 static int get_attention_status	(struct hotplug_slot *slot, u8 *value);
 static int get_latch_status	(struct hotplug_slot *slot, u8 *value);
 static int get_adapter_status	(struct hotplug_slot *slot, u8 *value);
-static int get_max_bus_speed	(struct hotplug_slot *slot, enum pci_bus_speed *value);
-static int get_cur_bus_speed	(struct hotplug_slot *slot, enum pci_bus_speed *value);
 
 /**
  * release_slot - free up the memory used by a slot
@@ -113,8 +111,6 @@
 	ops->disable_slot = disable_slot;
 	ops->get_power_status = get_power_status;
 	ops->get_adapter_status = get_adapter_status;
-	ops->get_max_bus_speed = get_max_bus_speed;
-	ops->get_cur_bus_speed = get_cur_bus_speed;
 	if (MRL_SENS(ctrl))
 		ops->get_latch_status = get_latch_status;
 	if (ATTN_LED(ctrl)) {
@@ -227,27 +223,6 @@
 	return pciehp_get_adapter_status(slot, value);
 }
 
-static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
-				enum pci_bus_speed *value)
-{
-	struct slot *slot = hotplug_slot->private;
-
-	ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
-		 __func__, slot_name(slot));
-
-	return pciehp_get_max_link_speed(slot, value);
-}
-
-static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
-{
-	struct slot *slot = hotplug_slot->private;
-
-	ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
-		 __func__, slot_name(slot));
-
-	return pciehp_get_cur_link_speed(slot, value);
-}
-
 static int pciehp_probe(struct pcie_device *dev)
 {
 	int rc;
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index d6ac1b2..9a7f247 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -341,6 +341,7 @@
 		p_slot->state = POWERON_STATE;
 		break;
 	default:
+		kfree(info);
 		goto out;
 	}
 	queue_work(pciehp_wq, &info->work);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 10040d5..40b48f5 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -492,6 +492,7 @@
 	u16 slot_cmd;
 	u16 cmd_mask;
 	u16 slot_status;
+	u16 lnk_status;
 	int retval = 0;
 
 	/* Clear sticky power-fault bit from previous power failures */
@@ -523,6 +524,14 @@
 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
 
+	retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
+	if (retval) {
+		ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n",
+				__func__);
+		return retval;
+	}
+	pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+
 	return retval;
 }
 
@@ -610,37 +619,6 @@
 	return IRQ_HANDLED;
 }
 
-int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *value)
-{
-	struct controller *ctrl = slot->ctrl;
-	enum pcie_link_speed lnk_speed;
-	u32	lnk_cap;
-	int retval = 0;
-
-	retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap);
-	if (retval) {
-		ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
-		return retval;
-	}
-
-	switch (lnk_cap & 0x000F) {
-	case 1:
-		lnk_speed = PCIE_2_5GB;
-		break;
-	case 2:
-		lnk_speed = PCIE_5_0GB;
-		break;
-	default:
-		lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
-		break;
-	}
-
-	*value = lnk_speed;
-	ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed);
-
-	return retval;
-}
-
 int pciehp_get_max_lnk_width(struct slot *slot,
 				 enum pcie_link_width *value)
 {
@@ -691,38 +669,6 @@
 	return retval;
 }
 
-int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *value)
-{
-	struct controller *ctrl = slot->ctrl;
-	enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN;
-	int retval = 0;
-	u16 lnk_status;
-
-	retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
-	if (retval) {
-		ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
-			 __func__);
-		return retval;
-	}
-
-	switch (lnk_status & PCI_EXP_LNKSTA_CLS) {
-	case 1:
-		lnk_speed = PCIE_2_5GB;
-		break;
-	case 2:
-		lnk_speed = PCIE_5_0GB;
-		break;
-	default:
-		lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
-		break;
-	}
-
-	*value = lnk_speed;
-	ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed);
-
-	return retval;
-}
-
 int pciehp_get_cur_lnk_width(struct slot *slot,
 				 enum pcie_link_width *value)
 {
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 2173310..0a16444 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -53,17 +53,15 @@
 		busnr = pci_scan_bridge(parent, dev, busnr, pass);
 	if (!dev->subordinate)
 		return -1;
-	pci_bus_size_bridges(dev->subordinate);
-	pci_bus_assign_resources(parent);
-	pci_enable_bridges(parent);
-	pci_bus_add_devices(parent);
+
 	return 0;
 }
 
 int pciehp_configure_device(struct slot *p_slot)
 {
 	struct pci_dev *dev;
-	struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
+	struct pci_dev *bridge = p_slot->ctrl->pcie->port;
+	struct pci_bus *parent = bridge->subordinate;
 	int num, fn;
 	struct controller *ctrl = p_slot->ctrl;
 
@@ -96,12 +94,25 @@
 				(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
 			pciehp_add_bridge(dev);
 		}
+		pci_dev_put(dev);
+	}
+
+	pci_assign_unassigned_bridge_resources(bridge);
+
+	for (fn = 0; fn < 8; fn++) {
+		dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
+		if (!dev)
+			continue;
+		if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
+			pci_dev_put(dev);
+			continue;
+		}
 		pci_configure_slot(dev);
 		pci_dev_put(dev);
 	}
 
-	pci_bus_assign_resources(parent);
 	pci_bus_add_devices(parent);
+
 	return 0;
 }
 
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index c159223..dcaae72 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -130,10 +130,9 @@
 	return 0;
 }
 
-static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
+static enum pci_bus_speed get_max_bus_speed(struct slot *slot)
 {
-	struct slot *slot = (struct slot *)hotplug_slot->private;
-
+	enum pci_bus_speed speed;
 	switch (slot->type) {
 	case 1:
 	case 2:
@@ -141,30 +140,30 @@
 	case 4:
 	case 5:
 	case 6:
-		*value = PCI_SPEED_33MHz;	/* speed for case 1-6 */
+		speed = PCI_SPEED_33MHz;	/* speed for case 1-6 */
 		break;
 	case 7:
 	case 8:
-		*value = PCI_SPEED_66MHz;
+		speed = PCI_SPEED_66MHz;
 		break;
 	case 11:
 	case 14:
-		*value = PCI_SPEED_66MHz_PCIX;
+		speed = PCI_SPEED_66MHz_PCIX;
 		break;
 	case 12:
 	case 15:
-		*value = PCI_SPEED_100MHz_PCIX;
+		speed = PCI_SPEED_100MHz_PCIX;
 		break;
 	case 13:
 	case 16:
-		*value = PCI_SPEED_133MHz_PCIX;
+		speed = PCI_SPEED_133MHz_PCIX;
 		break;
 	default:
-		*value = PCI_SPEED_UNKNOWN;
+		speed = PCI_SPEED_UNKNOWN;
 		break;
-
 	}
-	return 0;
+
+	return speed;
 }
 
 static int get_children_props(struct device_node *dn, const int **drc_indexes,
@@ -408,6 +407,8 @@
 		slot->state = NOT_VALID;
 		return -EINVAL;
 	}
+
+	slot->bus->max_bus_speed = get_max_bus_speed(slot);
 	return 0;
 }
 
@@ -429,7 +430,6 @@
 	.get_power_status = get_power_status,
 	.get_attention_status = get_attention_status,
 	.get_adapter_status = get_adapter_status,
-	.get_max_bus_speed = get_max_bus_speed,
 };
 
 module_init(rpaphp_init);
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 8e210cd7..d2627e1 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -333,8 +333,6 @@
 	int (*set_attention_status)(struct slot *slot, u8 status);
 	int (*get_latch_status)(struct slot *slot, u8 *status);
 	int (*get_adapter_status)(struct slot *slot, u8 *status);
-	int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
-	int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
 	int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed);
 	int (*get_mode1_ECC_cap)(struct slot *slot, u8 *mode);
 	int (*get_prog_int)(struct slot *slot, u8 *prog_int);
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 8a520a3..a506229 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -65,8 +65,6 @@
 static int get_attention_status	(struct hotplug_slot *slot, u8 *value);
 static int get_latch_status	(struct hotplug_slot *slot, u8 *value);
 static int get_adapter_status	(struct hotplug_slot *slot, u8 *value);
-static int get_max_bus_speed	(struct hotplug_slot *slot, enum pci_bus_speed *value);
-static int get_cur_bus_speed	(struct hotplug_slot *slot, enum pci_bus_speed *value);
 
 static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
 	.set_attention_status =	set_attention_status,
@@ -76,8 +74,6 @@
 	.get_attention_status =	get_attention_status,
 	.get_latch_status =	get_latch_status,
 	.get_adapter_status =	get_adapter_status,
-	.get_max_bus_speed =	get_max_bus_speed,
-	.get_cur_bus_speed =	get_cur_bus_speed,
 };
 
 /**
@@ -279,37 +275,6 @@
 	return 0;
 }
 
-static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
-				enum pci_bus_speed *value)
-{
-	struct slot *slot = get_slot(hotplug_slot);
-	int retval;
-
-	ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
-		 __func__, slot_name(slot));
-
-	retval = slot->hpc_ops->get_max_bus_speed(slot, value);
-	if (retval < 0)
-		*value = PCI_SPEED_UNKNOWN;
-
-	return 0;
-}
-
-static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
-{
-	struct slot *slot = get_slot(hotplug_slot);
-	int retval;
-
-	ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
-		 __func__, slot_name(slot));
-
-	retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
-	if (retval < 0)
-		*value = PCI_SPEED_UNKNOWN;
-
-	return 0;
-}
-
 static int is_shpc_capable(struct pci_dev *dev)
 {
 	if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index b8ab279..3bba0c0 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -285,17 +285,8 @@
 		return WRONG_BUS_FREQUENCY;
 	}
 
-	rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp);
-	if (rc) {
-		ctrl_err(ctrl, "Can't get bus operation speed\n");
-		return WRONG_BUS_FREQUENCY;
-	}
-
-	rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp);
-	if (rc) {
-		ctrl_err(ctrl, "Can't get max bus operation speed\n");
-		msp = bsp;
-	}
+	bsp = ctrl->pci_dev->bus->cur_bus_speed;
+	msp = ctrl->pci_dev->bus->max_bus_speed;
 
 	/* Check if there are other slots or devices on the same bus */
 	if (!list_empty(&ctrl->pci_dev->subordinate->devices))
@@ -462,6 +453,7 @@
 		p_slot->state = POWERON_STATE;
 		break;
 	default:
+		kfree(info);
 		goto out;
 	}
 	queue_work(shpchp_wq, &info->work);
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 86dc398..5f5e8d2 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -660,6 +660,75 @@
 	return retval;
 }
 
+static int shpc_get_cur_bus_speed(struct controller *ctrl)
+{
+	int retval = 0;
+	struct pci_bus *bus = ctrl->pci_dev->subordinate;
+	enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
+	u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG);
+	u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
+	u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7);
+
+	if ((pi == 1) && (speed_mode > 4)) {
+		retval = -ENODEV;
+		goto out;
+	}
+
+	switch (speed_mode) {
+	case 0x0:
+		bus_speed = PCI_SPEED_33MHz;
+		break;
+	case 0x1:
+		bus_speed = PCI_SPEED_66MHz;
+		break;
+	case 0x2:
+		bus_speed = PCI_SPEED_66MHz_PCIX;
+		break;
+	case 0x3:
+		bus_speed = PCI_SPEED_100MHz_PCIX;
+		break;
+	case 0x4:
+		bus_speed = PCI_SPEED_133MHz_PCIX;
+		break;
+	case 0x5:
+		bus_speed = PCI_SPEED_66MHz_PCIX_ECC;
+		break;
+	case 0x6:
+		bus_speed = PCI_SPEED_100MHz_PCIX_ECC;
+		break;
+	case 0x7:
+		bus_speed = PCI_SPEED_133MHz_PCIX_ECC;
+		break;
+	case 0x8:
+		bus_speed = PCI_SPEED_66MHz_PCIX_266;
+		break;
+	case 0x9:
+		bus_speed = PCI_SPEED_100MHz_PCIX_266;
+		break;
+	case 0xa:
+		bus_speed = PCI_SPEED_133MHz_PCIX_266;
+		break;
+	case 0xb:
+		bus_speed = PCI_SPEED_66MHz_PCIX_533;
+		break;
+	case 0xc:
+		bus_speed = PCI_SPEED_100MHz_PCIX_533;
+		break;
+	case 0xd:
+		bus_speed = PCI_SPEED_133MHz_PCIX_533;
+		break;
+	default:
+		retval = -ENODEV;
+		break;
+	}
+
+ out:
+	bus->cur_bus_speed = bus_speed;
+	dbg("Current bus speed = %d\n", bus_speed);
+	return retval;
+}
+
+
 static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
 {
 	int retval;
@@ -720,6 +789,8 @@
 	retval = shpc_write_cmd(slot, 0, cmd);
 	if (retval)
 		ctrl_err(ctrl, "%s: Write command failed!\n", __func__);
+	else
+		shpc_get_cur_bus_speed(ctrl);
 
 	return retval;
 }
@@ -803,10 +874,10 @@
 	return IRQ_HANDLED;
 }
 
-static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
+static int shpc_get_max_bus_speed(struct controller *ctrl)
 {
 	int retval = 0;
-	struct controller *ctrl = slot->ctrl;
+	struct pci_bus *bus = ctrl->pci_dev->subordinate;
 	enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
 	u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
 	u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1);
@@ -842,79 +913,12 @@
 			retval = -ENODEV;
 	}
 
-	*value = bus_speed;
+	bus->max_bus_speed = bus_speed;
 	ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed);
 
 	return retval;
 }
 
-static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value)
-{
-	int retval = 0;
-	struct controller *ctrl = slot->ctrl;
-	enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
-	u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG);
-	u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
-	u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7);
-
-	if ((pi == 1) && (speed_mode > 4)) {
-		*value = PCI_SPEED_UNKNOWN;
-		return -ENODEV;
-	}
-
-	switch (speed_mode) {
-	case 0x0:
-		*value = PCI_SPEED_33MHz;
-		break;
-	case 0x1:
-		*value = PCI_SPEED_66MHz;
-		break;
-	case 0x2:
-		*value = PCI_SPEED_66MHz_PCIX;
-		break;
-	case 0x3:
-		*value = PCI_SPEED_100MHz_PCIX;
-		break;
-	case 0x4:
-		*value = PCI_SPEED_133MHz_PCIX;
-		break;
-	case 0x5:
-		*value = PCI_SPEED_66MHz_PCIX_ECC;
-		break;
-	case 0x6:
-		*value = PCI_SPEED_100MHz_PCIX_ECC;
-		break;
-	case 0x7:
-		*value = PCI_SPEED_133MHz_PCIX_ECC;
-		break;
-	case 0x8:
-		*value = PCI_SPEED_66MHz_PCIX_266;
-		break;
-	case 0x9:
-		*value = PCI_SPEED_100MHz_PCIX_266;
-		break;
-	case 0xa:
-		*value = PCI_SPEED_133MHz_PCIX_266;
-		break;
-	case 0xb:
-		*value = PCI_SPEED_66MHz_PCIX_533;
-		break;
-	case 0xc:
-		*value = PCI_SPEED_100MHz_PCIX_533;
-		break;
-	case 0xd:
-		*value = PCI_SPEED_133MHz_PCIX_533;
-		break;
-	default:
-		*value = PCI_SPEED_UNKNOWN;
-		retval = -ENODEV;
-		break;
-	}
-
-	ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed);
-	return retval;
-}
-
 static struct hpc_ops shpchp_hpc_ops = {
 	.power_on_slot			= hpc_power_on_slot,
 	.slot_enable			= hpc_slot_enable,
@@ -926,8 +930,6 @@
 	.get_latch_status		= hpc_get_latch_status,
 	.get_adapter_status		= hpc_get_adapter_status,
 
-	.get_max_bus_speed		= hpc_get_max_bus_speed,
-	.get_cur_bus_speed		= hpc_get_cur_bus_speed,
 	.get_adapter_speed		= hpc_get_adapter_speed,
 	.get_mode1_ECC_cap		= hpc_get_mode1_ECC_cap,
 	.get_prog_int			= hpc_get_prog_int,
@@ -1086,6 +1088,9 @@
 	}
 	ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq);
 
+	shpc_get_max_bus_speed(ctrl);
+	shpc_get_cur_bus_speed(ctrl);
+
 	/*
 	 * If this is the first controller to be initialized,
 	 * initialize the shpchpd work queue
diff --git a/drivers/pci/hotplug/shpchp_sysfs.c b/drivers/pci/hotplug/shpchp_sysfs.c
index 29fa9d2..071b7dc 100644
--- a/drivers/pci/hotplug/shpchp_sysfs.c
+++ b/drivers/pci/hotplug/shpchp_sysfs.c
@@ -47,8 +47,7 @@
 	bus = pdev->subordinate;
 
 	out += sprintf(buf, "Free resources: memory\n");
-	for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) {
-		res = bus->resource[index];
+	pci_bus_for_each_resource(bus, res, index) {
 		if (res && (res->flags & IORESOURCE_MEM) &&
 				!(res->flags & IORESOURCE_PREFETCH)) {
 			out += sprintf(out, "start = %8.8llx, "
@@ -58,8 +57,7 @@
 		}
 	}
 	out += sprintf(out, "Free resources: prefetchable memory\n");
-	for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) {
-		res = bus->resource[index];
+	pci_bus_for_each_resource(bus, res, index) {
 		if (res && (res->flags & IORESOURCE_MEM) &&
 			       (res->flags & IORESOURCE_PREFETCH)) {
 			out += sprintf(out, "start = %8.8llx, "
@@ -69,8 +67,7 @@
 		}
 	}
 	out += sprintf(out, "Free resources: IO\n");
-	for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) {
-		res = bus->resource[index];
+	pci_bus_for_each_resource(bus, res, index) {
 		if (res && (res->flags & IORESOURCE_IO)) {
 			out += sprintf(out, "start = %8.8llx, "
 					"length = %8.8llx\n",
diff --git a/drivers/pci/legacy.c b/drivers/pci/legacy.c
deleted file mode 100644
index 871f65c..0000000
--- a/drivers/pci/legacy.c
+++ /dev/null
@@ -1,34 +0,0 @@
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include "pci.h"
-
-/**
- * pci_find_device - begin or continue searching for a PCI device by vendor/device id
- * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
- * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
- * @from: Previous PCI device found in search, or %NULL for new search.
- *
- * Iterates through the list of known PCI devices.  If a PCI device is found
- * with a matching @vendor and @device, a pointer to its device structure is
- * returned.  Otherwise, %NULL is returned.
- * A new search is initiated by passing %NULL as the @from argument.
- * Otherwise if @from is not %NULL, searches continue from next device
- * on the global list.
- *
- * NOTE: Do not use this function any more; use pci_get_device() instead, as
- * the PCI device returned by this function can disappear at any moment in
- * time.
- */
-struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
-				struct pci_dev *from)
-{
-	struct pci_dev *pdev;
-
-	pci_dev_get(from);
-	pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
-	pci_dev_put(pdev);
-	return pdev;
-}
-EXPORT_SYMBOL(pci_find_device);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 7e28295..c0c7391 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -16,8 +16,144 @@
 #include <acpi/acpi_bus.h>
 
 #include <linux/pci-acpi.h>
+#include <linux/pm_runtime.h>
 #include "pci.h"
 
+static DEFINE_MUTEX(pci_acpi_pm_notify_mtx);
+
+/**
+ * pci_acpi_wake_bus - Wake-up notification handler for root buses.
+ * @handle: ACPI handle of a device the notification is for.
+ * @event: Type of the signaled event.
+ * @context: PCI root bus to wake up devices on.
+ */
+static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context)
+{
+	struct pci_bus *pci_bus = context;
+
+	if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus)
+		pci_pme_wakeup_bus(pci_bus);
+}
+
+/**
+ * pci_acpi_wake_dev - Wake-up notification handler for PCI devices.
+ * @handle: ACPI handle of a device the notification is for.
+ * @event: Type of the signaled event.
+ * @context: PCI device object to wake up.
+ */
+static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
+{
+	struct pci_dev *pci_dev = context;
+
+	if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
+		pci_check_pme_status(pci_dev);
+		pm_runtime_resume(&pci_dev->dev);
+		if (pci_dev->subordinate)
+			pci_pme_wakeup_bus(pci_dev->subordinate);
+	}
+}
+
+/**
+ * add_pm_notifier - Register PM notifier for given ACPI device.
+ * @dev: ACPI device to add the notifier for.
+ * @context: PCI device or bus to check for PME status if an event is signaled.
+ *
+ * NOTE: @dev need not be a run-wake or wake-up device to be a valid source of
+ * PM wake-up events.  For example, wake-up events may be generated for bridges
+ * if one of the devices below the bridge is signaling PME, even if the bridge
+ * itself doesn't have a wake-up GPE associated with it.
+ */
+static acpi_status add_pm_notifier(struct acpi_device *dev,
+				   acpi_notify_handler handler,
+				   void *context)
+{
+	acpi_status status = AE_ALREADY_EXISTS;
+
+	mutex_lock(&pci_acpi_pm_notify_mtx);
+
+	if (dev->wakeup.flags.notifier_present)
+		goto out;
+
+	status = acpi_install_notify_handler(dev->handle,
+					     ACPI_SYSTEM_NOTIFY,
+					     handler, context);
+	if (ACPI_FAILURE(status))
+		goto out;
+
+	dev->wakeup.flags.notifier_present = true;
+
+ out:
+	mutex_unlock(&pci_acpi_pm_notify_mtx);
+	return status;
+}
+
+/**
+ * remove_pm_notifier - Unregister PM notifier from given ACPI device.
+ * @dev: ACPI device to remove the notifier from.
+ */
+static acpi_status remove_pm_notifier(struct acpi_device *dev,
+				      acpi_notify_handler handler)
+{
+	acpi_status status = AE_BAD_PARAMETER;
+
+	mutex_lock(&pci_acpi_pm_notify_mtx);
+
+	if (!dev->wakeup.flags.notifier_present)
+		goto out;
+
+	status = acpi_remove_notify_handler(dev->handle,
+					    ACPI_SYSTEM_NOTIFY,
+					    handler);
+	if (ACPI_FAILURE(status))
+		goto out;
+
+	dev->wakeup.flags.notifier_present = false;
+
+ out:
+	mutex_unlock(&pci_acpi_pm_notify_mtx);
+	return status;
+}
+
+/**
+ * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus.
+ * @dev: ACPI device to add the notifier for.
+ * @pci_bus: PCI bus to walk checking for PME status if an event is signaled.
+ */
+acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
+					 struct pci_bus *pci_bus)
+{
+	return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus);
+}
+
+/**
+ * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier.
+ * @dev: ACPI device to remove the notifier from.
+ */
+acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
+{
+	return remove_pm_notifier(dev, pci_acpi_wake_bus);
+}
+
+/**
+ * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
+ * @dev: ACPI device to add the notifier for.
+ * @pci_dev: PCI device to check for the PME status if an event is signaled.
+ */
+acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
+				     struct pci_dev *pci_dev)
+{
+	return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev);
+}
+
+/**
+ * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier.
+ * @dev: ACPI device to remove the notifier from.
+ */
+acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
+{
+	return remove_pm_notifier(dev, pci_acpi_wake_dev);
+}
+
 /*
  * _SxD returns the D-state with the highest power
  * (lowest D-state number) supported in the S-state "x".
@@ -131,12 +267,87 @@
 	return 0;
 }
 
+/**
+ * acpi_dev_run_wake - Enable/disable wake-up for given device.
+ * @phys_dev: Device to enable/disable the platform to wake-up the system for.
+ * @enable: Whether enable or disable the wake-up functionality.
+ *
+ * Find the ACPI device object corresponding to @pci_dev and try to
+ * enable/disable the GPE associated with it.
+ */
+static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
+{
+	struct acpi_device *dev;
+	acpi_handle handle;
+	int error = -ENODEV;
+
+	if (!device_run_wake(phys_dev))
+		return -EINVAL;
+
+	handle = DEVICE_ACPI_HANDLE(phys_dev);
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
+		dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	if (enable) {
+		if (!dev->wakeup.run_wake_count++) {
+			acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
+			acpi_enable_gpe(dev->wakeup.gpe_device,
+					dev->wakeup.gpe_number,
+					ACPI_GPE_TYPE_RUNTIME);
+		}
+	} else if (dev->wakeup.run_wake_count > 0) {
+		if (!--dev->wakeup.run_wake_count) {
+			acpi_disable_gpe(dev->wakeup.gpe_device,
+					 dev->wakeup.gpe_number,
+					 ACPI_GPE_TYPE_RUNTIME);
+			acpi_disable_wakeup_device_power(dev);
+		}
+	} else {
+		error = -EALREADY;
+	}
+
+	return error;
+}
+
+static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
+{
+	while (bus->parent) {
+		struct pci_dev *bridge = bus->self;
+
+		if (bridge->pme_interrupt)
+			return;
+		if (!acpi_dev_run_wake(&bridge->dev, enable))
+			return;
+		bus = bus->parent;
+	}
+
+	/* We have reached the root bus. */
+	if (bus->bridge)
+		acpi_dev_run_wake(bus->bridge, enable);
+}
+
+static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
+{
+	if (dev->pme_interrupt)
+		return 0;
+
+	if (!acpi_dev_run_wake(&dev->dev, enable))
+		return 0;
+
+	acpi_pci_propagate_run_wake(dev->bus, enable);
+	return 0;
+}
+
 static struct pci_platform_pm_ops acpi_pci_platform_pm = {
 	.is_manageable = acpi_pci_power_manageable,
 	.set_state = acpi_pci_set_power_state,
 	.choose_state = acpi_pci_choose_state,
 	.can_wakeup = acpi_pci_can_wakeup,
 	.sleep_wake = acpi_pci_sleep_wake,
+	.run_wake = acpi_pci_run_wake,
 };
 
 /* ACPI bus type */
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index e5d47be..f9a0aec 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/cpu.h>
+#include <linux/pm_runtime.h>
 #include "pci.h"
 
 struct pci_dynid {
@@ -404,6 +405,35 @@
 	pci_msix_shutdown(pci_dev);
 }
 
+#ifdef CONFIG_PM_OPS
+
+/* Auxiliary functions used for system resume and run-time resume. */
+
+/**
+ * pci_restore_standard_config - restore standard config registers of PCI device
+ * @pci_dev: PCI device to handle
+ */
+static int pci_restore_standard_config(struct pci_dev *pci_dev)
+{
+	pci_update_current_state(pci_dev, PCI_UNKNOWN);
+
+	if (pci_dev->current_state != PCI_D0) {
+		int error = pci_set_power_state(pci_dev, PCI_D0);
+		if (error)
+			return error;
+	}
+
+	return pci_restore_state(pci_dev);
+}
+
+static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
+{
+	pci_restore_standard_config(pci_dev);
+	pci_fixup_device(pci_fixup_resume_early, pci_dev);
+}
+
+#endif
+
 #ifdef CONFIG_PM_SLEEP
 
 /*
@@ -520,29 +550,6 @@
 
 /* Auxiliary functions used by the new power management framework */
 
-/**
- * pci_restore_standard_config - restore standard config registers of PCI device
- * @pci_dev: PCI device to handle
- */
-static int pci_restore_standard_config(struct pci_dev *pci_dev)
-{
-	pci_update_current_state(pci_dev, PCI_UNKNOWN);
-
-	if (pci_dev->current_state != PCI_D0) {
-		int error = pci_set_power_state(pci_dev, PCI_D0);
-		if (error)
-			return error;
-	}
-
-	return pci_restore_state(pci_dev);
-}
-
-static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
-{
-	pci_restore_standard_config(pci_dev);
-	pci_fixup_device(pci_fixup_resume_early, pci_dev);
-}
-
 static void pci_pm_default_resume(struct pci_dev *pci_dev)
 {
 	pci_fixup_device(pci_fixup_resume, pci_dev);
@@ -581,6 +588,17 @@
 	struct device_driver *drv = dev->driver;
 	int error = 0;
 
+	/*
+	 * PCI devices suspended at run time need to be resumed at this
+	 * point, because in general it is necessary to reconfigure them for
+	 * system suspend.  Namely, if the device is supposed to wake up the
+	 * system from the sleep state, we may need to reconfigure it for this
+	 * purpose.  In turn, if the device is not supposed to wake up the
+	 * system from the sleep state, we'll have to prevent it from signaling
+	 * wake-up.
+	 */
+	pm_runtime_resume(dev);
+
 	if (drv && drv->pm && drv->pm->prepare)
 		error = drv->pm->prepare(dev);
 
@@ -595,6 +613,13 @@
 		drv->pm->complete(dev);
 }
 
+#else /* !CONFIG_PM_SLEEP */
+
+#define pci_pm_prepare	NULL
+#define pci_pm_complete	NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
 #ifdef CONFIG_SUSPEND
 
 static int pci_pm_suspend(struct device *dev)
@@ -681,7 +706,7 @@
 	struct device_driver *drv = dev->driver;
 	int error = 0;
 
-	pci_pm_default_resume_noirq(pci_dev);
+	pci_pm_default_resume_early(pci_dev);
 
 	if (pci_has_legacy_pm_support(pci_dev))
 		return pci_legacy_resume_early(dev);
@@ -879,7 +904,7 @@
 	struct device_driver *drv = dev->driver;
 	int error = 0;
 
-	pci_pm_default_resume_noirq(pci_dev);
+	pci_pm_default_resume_early(pci_dev);
 
 	if (pci_has_legacy_pm_support(pci_dev))
 		return pci_legacy_resume_early(dev);
@@ -931,6 +956,84 @@
 
 #endif /* !CONFIG_HIBERNATION */
 
+#ifdef CONFIG_PM_RUNTIME
+
+static int pci_pm_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+	pci_power_t prev = pci_dev->current_state;
+	int error;
+
+	if (!pm || !pm->runtime_suspend)
+		return -ENOSYS;
+
+	error = pm->runtime_suspend(dev);
+	suspend_report_result(pm->runtime_suspend, error);
+	if (error)
+		return error;
+
+	pci_fixup_device(pci_fixup_suspend, pci_dev);
+
+	if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
+	    && pci_dev->current_state != PCI_UNKNOWN) {
+		WARN_ONCE(pci_dev->current_state != prev,
+			"PCI PM: State of device not saved by %pF\n",
+			pm->runtime_suspend);
+		return 0;
+	}
+
+	if (!pci_dev->state_saved)
+		pci_save_state(pci_dev);
+
+	pci_finish_runtime_suspend(pci_dev);
+
+	return 0;
+}
+
+static int pci_pm_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (!pm || !pm->runtime_resume)
+		return -ENOSYS;
+
+	pci_pm_default_resume_early(pci_dev);
+	__pci_enable_wake(pci_dev, PCI_D0, true, false);
+	pci_fixup_device(pci_fixup_resume, pci_dev);
+
+	return pm->runtime_resume(dev);
+}
+
+static int pci_pm_runtime_idle(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (!pm)
+		return -ENOSYS;
+
+	if (pm->runtime_idle) {
+		int ret = pm->runtime_idle(dev);
+		if (ret)
+			return ret;
+	}
+
+	pm_runtime_suspend(dev);
+
+	return 0;
+}
+
+#else /* !CONFIG_PM_RUNTIME */
+
+#define pci_pm_runtime_suspend	NULL
+#define pci_pm_runtime_resume	NULL
+#define pci_pm_runtime_idle	NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_OPS
+
 const struct dev_pm_ops pci_dev_pm_ops = {
 	.prepare = pci_pm_prepare,
 	.complete = pci_pm_complete,
@@ -946,15 +1049,18 @@
 	.thaw_noirq = pci_pm_thaw_noirq,
 	.poweroff_noirq = pci_pm_poweroff_noirq,
 	.restore_noirq = pci_pm_restore_noirq,
+	.runtime_suspend = pci_pm_runtime_suspend,
+	.runtime_resume = pci_pm_runtime_resume,
+	.runtime_idle = pci_pm_runtime_idle,
 };
 
 #define PCI_PM_OPS_PTR	(&pci_dev_pm_ops)
 
-#else /* !CONFIG_PM_SLEEP */
+#else /* !COMFIG_PM_OPS */
 
 #define PCI_PM_OPS_PTR	NULL
 
-#endif /* !CONFIG_PM_SLEEP */
+#endif /* !COMFIG_PM_OPS */
 
 /**
  * __pci_register_driver - register a new pci driver
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 315fea4..f4a2738 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -19,8 +19,8 @@
 #include <linux/pci-aspm.h>
 #include <linux/pm_wakeup.h>
 #include <linux/interrupt.h>
-#include <asm/dma.h>	/* isa_dma_bridge_buggy */
 #include <linux/device.h>
+#include <linux/pm_runtime.h>
 #include <asm/setup.h>
 #include "pci.h"
 
@@ -29,6 +29,12 @@
 };
 EXPORT_SYMBOL_GPL(pci_power_names);
 
+int isa_dma_bridge_buggy;
+EXPORT_SYMBOL(isa_dma_bridge_buggy);
+
+int pci_pci_problems;
+EXPORT_SYMBOL(pci_pci_problems);
+
 unsigned int pci_pm_d3_delay;
 
 static void pci_dev_d3_sleep(struct pci_dev *dev)
@@ -380,10 +386,9 @@
 {
 	const struct pci_bus *bus = dev->bus;
 	int i;
-	struct resource *best = NULL;
+	struct resource *best = NULL, *r;
 
-	for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
-		struct resource *r = bus->resource[i];
+	pci_bus_for_each_resource(bus, r, i) {
 		if (!r)
 			continue;
 		if (res->start && !(res->start >= r->start && res->end <= r->end))
@@ -457,6 +462,12 @@
 			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
 }
 
+static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
+{
+	return pci_platform_pm ?
+			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
+}
+
 /**
  * pci_raw_set_power_state - Use PCI PM registers to set the power state of
  *                           given PCI device
@@ -1190,6 +1201,66 @@
 }
 
 /**
+ * pci_check_pme_status - Check if given device has generated PME.
+ * @dev: Device to check.
+ *
+ * Check the PME status of the device and if set, clear it and clear PME enable
+ * (if set).  Return 'true' if PME status and PME enable were both set or
+ * 'false' otherwise.
+ */
+bool pci_check_pme_status(struct pci_dev *dev)
+{
+	int pmcsr_pos;
+	u16 pmcsr;
+	bool ret = false;
+
+	if (!dev->pm_cap)
+		return false;
+
+	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
+	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
+	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
+		return false;
+
+	/* Clear PME status. */
+	pmcsr |= PCI_PM_CTRL_PME_STATUS;
+	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
+		/* Disable PME to avoid interrupt flood. */
+		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
+		ret = true;
+	}
+
+	pci_write_config_word(dev, pmcsr_pos, pmcsr);
+
+	return ret;
+}
+
+/**
+ * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
+ * @dev: Device to handle.
+ * @ign: Ignored.
+ *
+ * Check if @dev has generated PME and queue a resume request for it in that
+ * case.
+ */
+static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
+{
+	if (pci_check_pme_status(dev))
+		pm_request_resume(&dev->dev);
+	return 0;
+}
+
+/**
+ * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
+ * @bus: Top bus of the subtree to walk.
+ */
+void pci_pme_wakeup_bus(struct pci_bus *bus)
+{
+	if (bus)
+		pci_walk_bus(bus, pci_pme_wakeup, NULL);
+}
+
+/**
  * pci_pme_capable - check the capability of PCI device to generate PME#
  * @dev: PCI device to handle.
  * @state: PCI state from which device will issue PME#.
@@ -1230,9 +1301,10 @@
 }
 
 /**
- * pci_enable_wake - enable PCI device as wakeup event source
+ * __pci_enable_wake - enable PCI device as wakeup event source
  * @dev: PCI device affected
  * @state: PCI state from which device will issue wakeup events
+ * @runtime: True if the events are to be generated at run time
  * @enable: True to enable event generation; false to disable
  *
  * This enables the device as a wakeup event source, or disables it.
@@ -1248,11 +1320,12 @@
  * Error code depending on the platform is returned if both the platform and
  * the native mechanism fail to enable the generation of wake-up events
  */
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
+int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
+		      bool runtime, bool enable)
 {
 	int ret = 0;
 
-	if (enable && !device_may_wakeup(&dev->dev))
+	if (enable && !runtime && !device_may_wakeup(&dev->dev))
 		return -EINVAL;
 
 	/* Don't do the same thing twice in a row for one device. */
@@ -1272,19 +1345,24 @@
 			pci_pme_active(dev, true);
 		else
 			ret = 1;
-		error = platform_pci_sleep_wake(dev, true);
+		error = runtime ? platform_pci_run_wake(dev, true) :
+					platform_pci_sleep_wake(dev, true);
 		if (ret)
 			ret = error;
 		if (!ret)
 			dev->wakeup_prepared = true;
 	} else {
-		platform_pci_sleep_wake(dev, false);
+		if (runtime)
+			platform_pci_run_wake(dev, false);
+		else
+			platform_pci_sleep_wake(dev, false);
 		pci_pme_active(dev, false);
 		dev->wakeup_prepared = false;
 	}
 
 	return ret;
 }
+EXPORT_SYMBOL(__pci_enable_wake);
 
 /**
  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
@@ -1394,6 +1472,66 @@
 }
 
 /**
+ * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
+ * @dev: PCI device being suspended.
+ *
+ * Prepare @dev to generate wake-up events at run time and put it into a low
+ * power state.
+ */
+int pci_finish_runtime_suspend(struct pci_dev *dev)
+{
+	pci_power_t target_state = pci_target_state(dev);
+	int error;
+
+	if (target_state == PCI_POWER_ERROR)
+		return -EIO;
+
+	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
+
+	error = pci_set_power_state(dev, target_state);
+
+	if (error)
+		__pci_enable_wake(dev, target_state, true, false);
+
+	return error;
+}
+
+/**
+ * pci_dev_run_wake - Check if device can generate run-time wake-up events.
+ * @dev: Device to check.
+ *
+ * Return true if the device itself is cabable of generating wake-up events
+ * (through the platform or using the native PCIe PME) or if the device supports
+ * PME and one of its upstream bridges can generate wake-up events.
+ */
+bool pci_dev_run_wake(struct pci_dev *dev)
+{
+	struct pci_bus *bus = dev->bus;
+
+	if (device_run_wake(&dev->dev))
+		return true;
+
+	if (!dev->pme_support)
+		return false;
+
+	while (bus->parent) {
+		struct pci_dev *bridge = bus->self;
+
+		if (device_run_wake(&bridge->dev))
+			return true;
+
+		bus = bus->parent;
+	}
+
+	/* We have reached the root bus. */
+	if (bus->bridge)
+		return device_run_wake(bus->bridge);
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(pci_dev_run_wake);
+
+/**
  * pci_pm_init - Initialize PM functions of given PCI device
  * @dev: PCI device to handle.
  */
@@ -2871,7 +3009,6 @@
 EXPORT_SYMBOL(pci_restore_state);
 EXPORT_SYMBOL(pci_pme_capable);
 EXPORT_SYMBOL(pci_pme_active);
-EXPORT_SYMBOL(pci_enable_wake);
 EXPORT_SYMBOL(pci_wake_from_d3);
 EXPORT_SYMBOL(pci_target_state);
 EXPORT_SYMBOL(pci_prepare_to_sleep);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index fbd0e3a..4eb10f4 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -35,6 +35,10 @@
  *
  * @sleep_wake: enables/disables the system wake up capability of given device
  *
+ * @run_wake: enables/disables the platform to generate run-time wake-up events
+ *		for given device (the device's wake-up capability has to be
+ *		enabled by @sleep_wake for this feature to work)
+ *
  * If given platform is generally capable of power managing PCI devices, all of
  * these callbacks are mandatory.
  */
@@ -44,11 +48,16 @@
 	pci_power_t (*choose_state)(struct pci_dev *dev);
 	bool (*can_wakeup)(struct pci_dev *dev);
 	int (*sleep_wake)(struct pci_dev *dev, bool enable);
+	int (*run_wake)(struct pci_dev *dev, bool enable);
 };
 
 extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
 extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
 extern void pci_disable_enabled_device(struct pci_dev *dev);
+extern bool pci_check_pme_status(struct pci_dev *dev);
+extern int pci_finish_runtime_suspend(struct pci_dev *dev);
+extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
+extern void pci_pme_wakeup_bus(struct pci_bus *bus);
 extern void pci_pm_init(struct pci_dev *dev);
 extern void platform_pci_wakeup_init(struct pci_dev *dev);
 extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
@@ -319,6 +328,13 @@
 	int (*reset)(struct pci_dev *dev, int probe);
 };
 
+#ifdef CONFIG_PCI_QUIRKS
 extern int pci_dev_specific_reset(struct pci_dev *dev, int probe);
+#else
+static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+{
+	return -ENOTTY;
+}
+#endif
 
 #endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 5a0c6ad..b8b494b 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -46,3 +46,7 @@
 	help
 	  This enables PCI Express ASPM debug support. It will add per-device
 	  interface to control ASPM.
+
+config PCIE_PME
+	def_bool y
+	depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 11f6bb1e..ea65454 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -11,3 +11,5 @@
 
 # Build PCI Express AER if needed
 obj-$(CONFIG_PCIEAER)		+= aer/
+
+obj-$(CONFIG_PCIE_PME) += pme/
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile
new file mode 100644
index 0000000..8b92380
--- /dev/null
+++ b/drivers/pci/pcie/pme/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for PCI-Express Root Port PME signaling driver
+#
+
+obj-$(CONFIG_PCIE_PME) += pmedriver.o
+
+pmedriver-objs := pcie_pme.o
+pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o
diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme/pcie_pme.c
new file mode 100644
index 0000000..7b3cbff
--- /dev/null
+++ b/drivers/pci/pcie/pme/pcie_pme.c
@@ -0,0 +1,505 @@
+/*
+ * PCIe Native PME support
+ *
+ * Copyright (C) 2007 - 2009 Intel Corp
+ * Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com>
+ * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License V2.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pcieport_if.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+#include <linux/pm_runtime.h>
+
+#include "../../pci.h"
+#include "pcie_pme.h"
+
+#define PCI_EXP_RTSTA_PME	0x10000 /* PME status */
+#define PCI_EXP_RTSTA_PENDING	0x20000 /* PME pending */
+
+/*
+ * If set, this switch will prevent the PCIe root port PME service driver from
+ * being registered.  Consequently, the interrupt-based PCIe PME signaling will
+ * not be used by any PCIe root ports in that case.
+ */
+static bool pcie_pme_disabled;
+
+/*
+ * The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
+ * "In order to maintain compatibility with non-PCI Express-aware system
+ * software, system power management logic must be configured by firmware to use
+ * the legacy mechanism of signaling PME by default.  PCI Express-aware system
+ * software must notify the firmware prior to enabling native, interrupt-based
+ * PME signaling."  However, if the platform doesn't provide us with a suitable
+ * notification mechanism or the notification fails, it is not clear whether or
+ * not we are supposed to use the interrupt-based PCIe PME signaling.  The
+ * switch below can be used to indicate the desired behaviour.  When set, it
+ * will make the kernel use the interrupt-based PCIe PME signaling regardless of
+ * the platform notification status, although the kernel will attempt to notify
+ * the platform anyway.  When unset, it will prevent the kernel from using the
+ * the interrupt-based PCIe PME signaling if the platform notification fails,
+ * which is the default.
+ */
+static bool pcie_pme_force_enable;
+
+/*
+ * If this switch is set, MSI will not be used for PCIe PME signaling.  This
+ * causes the PCIe port driver to use INTx interrupts only, but it turns out
+ * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
+ * wake-up from system sleep states.
+ */
+bool pcie_pme_msi_disabled;
+
+static int __init pcie_pme_setup(char *str)
+{
+	if (!strcmp(str, "off"))
+		pcie_pme_disabled = true;
+	else if (!strcmp(str, "force"))
+		pcie_pme_force_enable = true;
+	else if (!strcmp(str, "nomsi"))
+		pcie_pme_msi_disabled = true;
+	return 1;
+}
+__setup("pcie_pme=", pcie_pme_setup);
+
+/**
+ * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME.
+ * @srv: PCIe PME root port service to use for carrying out the check.
+ *
+ * Notify the platform that the native PCIe PME is going to be used and return
+ * 'true' if the control of the PCIe PME registers has been acquired from the
+ * platform.
+ */
+static bool pcie_pme_platform_setup(struct pcie_device *srv)
+{
+	if (!pcie_pme_platform_notify(srv))
+		return true;
+	return pcie_pme_force_enable;
+}
+
+struct pcie_pme_service_data {
+	spinlock_t lock;
+	struct pcie_device *srv;
+	struct work_struct work;
+	bool noirq; /* Don't enable the PME interrupt used by this service. */
+};
+
+/**
+ * pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation.
+ * @dev: PCIe root port or event collector.
+ * @enable: Enable or disable the interrupt.
+ */
+static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
+{
+	int rtctl_pos;
+	u16 rtctl;
+
+	rtctl_pos = pci_pcie_cap(dev) + PCI_EXP_RTCTL;
+
+	pci_read_config_word(dev, rtctl_pos, &rtctl);
+	if (enable)
+		rtctl |= PCI_EXP_RTCTL_PMEIE;
+	else
+		rtctl &= ~PCI_EXP_RTCTL_PMEIE;
+	pci_write_config_word(dev, rtctl_pos, rtctl);
+}
+
+/**
+ * pcie_pme_clear_status - Clear root port PME interrupt status.
+ * @dev: PCIe root port or event collector.
+ */
+static void pcie_pme_clear_status(struct pci_dev *dev)
+{
+	int rtsta_pos;
+	u32 rtsta;
+
+	rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA;
+
+	pci_read_config_dword(dev, rtsta_pos, &rtsta);
+	rtsta |= PCI_EXP_RTSTA_PME;
+	pci_write_config_dword(dev, rtsta_pos, rtsta);
+}
+
+/**
+ * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
+ * @bus: PCI bus to scan.
+ *
+ * Scan given PCI bus and all buses under it for devices asserting PME#.
+ */
+static bool pcie_pme_walk_bus(struct pci_bus *bus)
+{
+	struct pci_dev *dev;
+	bool ret = false;
+
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		/* Skip PCIe devices in case we started from a root port. */
+		if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
+			pm_request_resume(&dev->dev);
+			ret = true;
+		}
+
+		if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate))
+			ret = true;
+	}
+
+	return ret;
+}
+
+/**
+ * pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME.
+ * @bus: Secondary bus of the bridge.
+ * @devfn: Device/function number to check.
+ *
+ * PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band
+ * PCIe PME message.  In such that case the bridge should use the Requester ID
+ * of device/function number 0 on its secondary bus.
+ */
+static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
+{
+	struct pci_dev *dev;
+	bool found = false;
+
+	if (devfn)
+		return false;
+
+	dev = pci_dev_get(bus->self);
+	if (!dev)
+		return false;
+
+	if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
+		down_read(&pci_bus_sem);
+		if (pcie_pme_walk_bus(bus))
+			found = true;
+		up_read(&pci_bus_sem);
+	}
+
+	pci_dev_put(dev);
+	return found;
+}
+
+/**
+ * pcie_pme_handle_request - Find device that generated PME and handle it.
+ * @port: Root port or event collector that generated the PME interrupt.
+ * @req_id: PCIe Requester ID of the device that generated the PME.
+ */
+static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
+{
+	u8 busnr = req_id >> 8, devfn = req_id & 0xff;
+	struct pci_bus *bus;
+	struct pci_dev *dev;
+	bool found = false;
+
+	/* First, check if the PME is from the root port itself. */
+	if (port->devfn == devfn && port->bus->number == busnr) {
+		if (pci_check_pme_status(port)) {
+			pm_request_resume(&port->dev);
+			found = true;
+		} else {
+			/*
+			 * Apparently, the root port generated the PME on behalf
+			 * of a non-PCIe device downstream.  If this is done by
+			 * a root port, the Requester ID field in its status
+			 * register may contain either the root port's, or the
+			 * source device's information (PCI Express Base
+			 * Specification, Rev. 2.0, Section 6.1.9).
+			 */
+			down_read(&pci_bus_sem);
+			found = pcie_pme_walk_bus(port->subordinate);
+			up_read(&pci_bus_sem);
+		}
+		goto out;
+	}
+
+	/* Second, find the bus the source device is on. */
+	bus = pci_find_bus(pci_domain_nr(port->bus), busnr);
+	if (!bus)
+		goto out;
+
+	/* Next, check if the PME is from a PCIe-PCI bridge. */
+	found = pcie_pme_from_pci_bridge(bus, devfn);
+	if (found)
+		goto out;
+
+	/* Finally, try to find the PME source on the bus. */
+	down_read(&pci_bus_sem);
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		pci_dev_get(dev);
+		if (dev->devfn == devfn) {
+			found = true;
+			break;
+		}
+		pci_dev_put(dev);
+	}
+	up_read(&pci_bus_sem);
+
+	if (found) {
+		/* The device is there, but we have to check its PME status. */
+		found = pci_check_pme_status(dev);
+		if (found)
+			pm_request_resume(&dev->dev);
+		pci_dev_put(dev);
+	} else if (devfn) {
+		/*
+		 * The device is not there, but we can still try to recover by
+		 * assuming that the PME was reported by a PCIe-PCI bridge that
+		 * used devfn different from zero.
+		 */
+		dev_dbg(&port->dev, "PME interrupt generated for "
+			"non-existent device %02x:%02x.%d\n",
+			busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
+		found = pcie_pme_from_pci_bridge(bus, 0);
+	}
+
+ out:
+	if (!found)
+		dev_dbg(&port->dev, "Spurious native PME interrupt!\n");
+}
+
+/**
+ * pcie_pme_work_fn - Work handler for PCIe PME interrupt.
+ * @work: Work structure giving access to service data.
+ */
+static void pcie_pme_work_fn(struct work_struct *work)
+{
+	struct pcie_pme_service_data *data =
+			container_of(work, struct pcie_pme_service_data, work);
+	struct pci_dev *port = data->srv->port;
+	int rtsta_pos;
+	u32 rtsta;
+
+	rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA;
+
+	spin_lock_irq(&data->lock);
+
+	for (;;) {
+		if (data->noirq)
+			break;
+
+		pci_read_config_dword(port, rtsta_pos, &rtsta);
+		if (rtsta & PCI_EXP_RTSTA_PME) {
+			/*
+			 * Clear PME status of the port.  If there are other
+			 * pending PMEs, the status will be set again.
+			 */
+			pcie_pme_clear_status(port);
+
+			spin_unlock_irq(&data->lock);
+			pcie_pme_handle_request(port, rtsta & 0xffff);
+			spin_lock_irq(&data->lock);
+
+			continue;
+		}
+
+		/* No need to loop if there are no more PMEs pending. */
+		if (!(rtsta & PCI_EXP_RTSTA_PENDING))
+			break;
+
+		spin_unlock_irq(&data->lock);
+		cpu_relax();
+		spin_lock_irq(&data->lock);
+	}
+
+	if (!data->noirq)
+		pcie_pme_interrupt_enable(port, true);
+
+	spin_unlock_irq(&data->lock);
+}
+
+/**
+ * pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt.
+ * @irq: Interrupt vector.
+ * @context: Interrupt context pointer.
+ */
+static irqreturn_t pcie_pme_irq(int irq, void *context)
+{
+	struct pci_dev *port;
+	struct pcie_pme_service_data *data;
+	int rtsta_pos;
+	u32 rtsta;
+	unsigned long flags;
+
+	port = ((struct pcie_device *)context)->port;
+	data = get_service_data((struct pcie_device *)context);
+
+	rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA;
+
+	spin_lock_irqsave(&data->lock, flags);
+	pci_read_config_dword(port, rtsta_pos, &rtsta);
+
+	if (!(rtsta & PCI_EXP_RTSTA_PME)) {
+		spin_unlock_irqrestore(&data->lock, flags);
+		return IRQ_NONE;
+	}
+
+	pcie_pme_interrupt_enable(port, false);
+	spin_unlock_irqrestore(&data->lock, flags);
+
+	/* We don't use pm_wq, because it's freezable. */
+	schedule_work(&data->work);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * pcie_pme_set_native - Set the PME interrupt flag for given device.
+ * @dev: PCI device to handle.
+ * @ign: Ignored.
+ */
+static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
+{
+	dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n");
+
+	device_set_run_wake(&dev->dev, true);
+	dev->pme_interrupt = true;
+	return 0;
+}
+
+/**
+ * pcie_pme_mark_devices - Set the PME interrupt flag for devices below a port.
+ * @port: PCIe root port or event collector to handle.
+ *
+ * For each device below given root port, including the port itself (or for each
+ * root complex integrated endpoint if @port is a root complex event collector)
+ * set the flag indicating that it can signal run-time wake-up events via PCIe
+ * PME interrupts.
+ */
+static void pcie_pme_mark_devices(struct pci_dev *port)
+{
+	pcie_pme_set_native(port, NULL);
+	if (port->subordinate) {
+		pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL);
+	} else {
+		struct pci_bus *bus = port->bus;
+		struct pci_dev *dev;
+
+		/* Check if this is a root port event collector. */
+		if (port->pcie_type != PCI_EXP_TYPE_RC_EC || !bus)
+			return;
+
+		down_read(&pci_bus_sem);
+		list_for_each_entry(dev, &bus->devices, bus_list)
+			if (pci_is_pcie(dev)
+			    && dev->pcie_type == PCI_EXP_TYPE_RC_END)
+				pcie_pme_set_native(dev, NULL);
+		up_read(&pci_bus_sem);
+	}
+}
+
+/**
+ * pcie_pme_probe - Initialize PCIe PME service for given root port.
+ * @srv: PCIe service to initialize.
+ */
+static int pcie_pme_probe(struct pcie_device *srv)
+{
+	struct pci_dev *port;
+	struct pcie_pme_service_data *data;
+	int ret;
+
+	if (!pcie_pme_platform_setup(srv))
+		return -EACCES;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	spin_lock_init(&data->lock);
+	INIT_WORK(&data->work, pcie_pme_work_fn);
+	data->srv = srv;
+	set_service_data(srv, data);
+
+	port = srv->port;
+	pcie_pme_interrupt_enable(port, false);
+	pcie_pme_clear_status(port);
+
+	ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
+	if (ret) {
+		kfree(data);
+	} else {
+		pcie_pme_mark_devices(port);
+		pcie_pme_interrupt_enable(port, true);
+	}
+
+	return ret;
+}
+
+/**
+ * pcie_pme_suspend - Suspend PCIe PME service device.
+ * @srv: PCIe service device to suspend.
+ */
+static int pcie_pme_suspend(struct pcie_device *srv)
+{
+	struct pcie_pme_service_data *data = get_service_data(srv);
+	struct pci_dev *port = srv->port;
+
+	spin_lock_irq(&data->lock);
+	pcie_pme_interrupt_enable(port, false);
+	pcie_pme_clear_status(port);
+	data->noirq = true;
+	spin_unlock_irq(&data->lock);
+
+	synchronize_irq(srv->irq);
+
+	return 0;
+}
+
+/**
+ * pcie_pme_resume - Resume PCIe PME service device.
+ * @srv - PCIe service device to resume.
+ */
+static int pcie_pme_resume(struct pcie_device *srv)
+{
+	struct pcie_pme_service_data *data = get_service_data(srv);
+	struct pci_dev *port = srv->port;
+
+	spin_lock_irq(&data->lock);
+	data->noirq = false;
+	pcie_pme_clear_status(port);
+	pcie_pme_interrupt_enable(port, true);
+	spin_unlock_irq(&data->lock);
+
+	return 0;
+}
+
+/**
+ * pcie_pme_remove - Prepare PCIe PME service device for removal.
+ * @srv - PCIe service device to resume.
+ */
+static void pcie_pme_remove(struct pcie_device *srv)
+{
+	pcie_pme_suspend(srv);
+	free_irq(srv->irq, srv);
+	kfree(get_service_data(srv));
+}
+
+static struct pcie_port_service_driver pcie_pme_driver = {
+	.name		= "pcie_pme",
+	.port_type 	= PCI_EXP_TYPE_ROOT_PORT,
+	.service 	= PCIE_PORT_SERVICE_PME,
+
+	.probe		= pcie_pme_probe,
+	.suspend	= pcie_pme_suspend,
+	.resume		= pcie_pme_resume,
+	.remove		= pcie_pme_remove,
+};
+
+/**
+ * pcie_pme_service_init - Register the PCIe PME service driver.
+ */
+static int __init pcie_pme_service_init(void)
+{
+	return pcie_pme_disabled ?
+		-ENODEV : pcie_port_service_register(&pcie_pme_driver);
+}
+
+module_init(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h
new file mode 100644
index 0000000..b30d2b7
--- /dev/null
+++ b/drivers/pci/pcie/pme/pcie_pme.h
@@ -0,0 +1,28 @@
+/*
+ * drivers/pci/pcie/pme/pcie_pme.h
+ *
+ * PCI Express Root Port PME signaling support
+ *
+ * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ */
+
+#ifndef _PCIE_PME_H_
+#define _PCIE_PME_H_
+
+struct pcie_device;
+
+#ifdef CONFIG_ACPI
+extern int pcie_pme_acpi_setup(struct pcie_device *srv);
+
+static inline int pcie_pme_platform_notify(struct pcie_device *srv)
+{
+	return pcie_pme_acpi_setup(srv);
+}
+#else /* !CONFIG_ACPI */
+static inline int pcie_pme_platform_notify(struct pcie_device *srv)
+{
+	return 0;
+}
+#endif /* !CONFIG_ACPI */
+
+#endif
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c
new file mode 100644
index 0000000..83ab228
--- /dev/null
+++ b/drivers/pci/pcie/pme/pcie_pme_acpi.c
@@ -0,0 +1,54 @@
+/*
+ * PCIe Native PME support, ACPI-related part
+ *
+ * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License V2.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+#include <linux/pcieport_if.h>
+
+/**
+ * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME.
+ * @srv - PCIe PME service for a root port or event collector.
+ *
+ * Invoked when the PCIe bus type loads PCIe PME service driver.  To avoid
+ * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME
+ * control to the kernel.
+ */
+int pcie_pme_acpi_setup(struct pcie_device *srv)
+{
+	acpi_status status = AE_NOT_FOUND;
+	struct pci_dev *port = srv->port;
+	acpi_handle handle;
+	int error = 0;
+
+	if (acpi_pci_disabled)
+		return -ENOSYS;
+
+	dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n");
+
+	handle = acpi_find_root_bridge_handle(port);
+	if (!handle)
+		return -EINVAL;
+
+	status = acpi_pci_osc_control_set(handle,
+			OSC_PCI_EXPRESS_PME_CONTROL |
+			OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+	if (ACPI_FAILURE(status)) {
+		dev_info(&port->dev,
+			"Failed to receive control of PCIe PME service: %s\n",
+			(status == AE_SUPPORT || status == AE_NOT_FOUND) ?
+			"no _OSC support" : "ACPI _OSC failed");
+		error = -ENODEV;
+	}
+
+	return error;
+}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index aaeb9d2..813a5c3 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -30,4 +30,21 @@
 extern int __must_check pcie_port_bus_register(void);
 extern void pcie_port_bus_unregister(void);
 
+#ifdef CONFIG_PCIE_PME
+extern bool pcie_pme_msi_disabled;
+
+static inline void pcie_pme_disable_msi(void)
+{
+	pcie_pme_msi_disabled = true;
+}
+
+static inline bool pcie_pme_no_msi(void)
+{
+	return pcie_pme_msi_disabled;
+}
+#else /* !CONFIG_PCIE_PME */
+static inline void pcie_pme_disable_msi(void) {}
+static inline bool pcie_pme_no_msi(void) { return false; }
+#endif /* !CONFIG_PCIE_PME */
+
 #endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index b174188..0d34ff4 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -186,16 +186,24 @@
  */
 static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
 {
-	int i, irq;
+	int i, irq = -1;
+
+	/* We have to use INTx if MSI cannot be used for PCIe PME. */
+	if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) {
+		if (dev->pin)
+			irq = dev->irq;
+		goto no_msi;
+	}
 
 	/* Try to use MSI-X if supported */
 	if (!pcie_port_enable_msix(dev, irqs, mask))
 		return 0;
+
 	/* We're not going to use MSI-X, so try MSI and fall back to INTx */
-	irq = -1;
 	if (!pci_enable_msi(dev) || dev->pin)
 		irq = dev->irq;
 
+ no_msi:
 	for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
 		irqs[i] = irq;
 	irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 13c8972..127e8f1 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/pcieport_if.h>
 #include <linux/aer.h>
+#include <linux/dmi.h>
 
 #include "portdrv.h"
 #include "aer/aerdrv.h"
@@ -273,10 +274,36 @@
 	.driver.pm 	= PCIE_PORTDRV_PM_OPS,
 };
 
+static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
+{
+	pr_notice("%s detected: will not use MSI for PCIe PME signaling\n",
+			d->ident);
+	pcie_pme_disable_msi();
+	return 0;
+}
+
+static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
+	/*
+	 * Boxes that should not use MSI for PCIe PME signaling.
+	 */
+	{
+	 .callback = dmi_pcie_pme_disable_msi,
+	 .ident = "MSI Wind U-100",
+	 .matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR,
+		     		"MICRO-STAR INTERNATIONAL CO., LTD"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "U-100"),
+		     },
+	 },
+	 {}
+};
+
 static int __init pcie_portdrv_init(void)
 {
 	int retval;
 
+	dmi_check_system(pcie_portdrv_dmi_table);
+
 	retval = pcie_port_bus_register();
 	if (retval) {
 		printk(KERN_WARNING "PCIE: bus_register error: %d\n", retval);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 446e4a9..270d069 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -89,6 +89,7 @@
 
 	if (pci_bus->bridge)
 		put_device(pci_bus->bridge);
+	pci_bus_remove_resources(pci_bus);
 	kfree(pci_bus);
 }
 
@@ -281,26 +282,12 @@
 	}
 }
 
-void __devinit pci_read_bridge_bases(struct pci_bus *child)
+static void __devinit pci_read_bridge_io(struct pci_bus *child)
 {
 	struct pci_dev *dev = child->self;
 	u8 io_base_lo, io_limit_lo;
-	u16 mem_base_lo, mem_limit_lo;
 	unsigned long base, limit;
 	struct resource *res;
-	int i;
-
-	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
-		return;
-
-	dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n",
-		 child->secondary, child->subordinate,
-		 dev->transparent ? " (subtractive decode)": "");
-
-	if (dev->transparent) {
-		for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
-			child->resource[i] = child->parent->resource[i - 3];
-	}
 
 	res = child->resource[0];
 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
@@ -316,26 +303,50 @@
 		limit |= (io_limit_hi << 16);
 	}
 
-	if (base <= limit) {
+	if (base && base <= limit) {
 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
 		if (!res->start)
 			res->start = base;
 		if (!res->end)
 			res->end = limit + 0xfff;
 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
+	} else {
+		dev_printk(KERN_DEBUG, &dev->dev,
+			 "  bridge window [io  %04lx - %04lx] reg reading\n",
+				 base, limit);
 	}
+}
+
+static void __devinit pci_read_bridge_mmio(struct pci_bus *child)
+{
+	struct pci_dev *dev = child->self;
+	u16 mem_base_lo, mem_limit_lo;
+	unsigned long base, limit;
+	struct resource *res;
 
 	res = child->resource[1];
 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
 	base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
 	limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
-	if (base <= limit) {
+	if (base && base <= limit) {
 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
 		res->start = base;
 		res->end = limit + 0xfffff;
 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
+	} else {
+		dev_printk(KERN_DEBUG, &dev->dev,
+			"  bridge window [mem 0x%08lx - 0x%08lx] reg reading\n",
+					 base, limit + 0xfffff);
 	}
+}
+
+static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
+{
+	struct pci_dev *dev = child->self;
+	u16 mem_base_lo, mem_limit_lo;
+	unsigned long base, limit;
+	struct resource *res;
 
 	res = child->resource[2];
 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
@@ -366,7 +377,7 @@
 #endif
 		}
 	}
-	if (base <= limit) {
+	if (base && base <= limit) {
 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
@@ -374,6 +385,44 @@
 		res->start = base;
 		res->end = limit + 0xfffff;
 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
+	} else {
+		dev_printk(KERN_DEBUG, &dev->dev,
+		     "  bridge window [mem 0x%08lx - %08lx pref] reg reading\n",
+					 base, limit + 0xfffff);
+	}
+}
+
+void __devinit pci_read_bridge_bases(struct pci_bus *child)
+{
+	struct pci_dev *dev = child->self;
+	struct resource *res;
+	int i;
+
+	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
+		return;
+
+	dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n",
+		 child->secondary, child->subordinate,
+		 dev->transparent ? " (subtractive decode)" : "");
+
+	pci_bus_remove_resources(child);
+	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
+		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
+
+	pci_read_bridge_io(child);
+	pci_read_bridge_mmio(child);
+	pci_read_bridge_mmio_pref(child);
+
+	if (dev->transparent) {
+		pci_bus_for_each_resource(child->parent, res, i) {
+			if (res) {
+				pci_bus_add_resource(child, res,
+						     PCI_SUBTRACTIVE_DECODE);
+				dev_printk(KERN_DEBUG, &dev->dev,
+					   "  bridge window %pR (subtractive decode)\n",
+					   res);
+			}
+		}
 	}
 }
 
@@ -387,10 +436,147 @@
 		INIT_LIST_HEAD(&b->children);
 		INIT_LIST_HEAD(&b->devices);
 		INIT_LIST_HEAD(&b->slots);
+		INIT_LIST_HEAD(&b->resources);
+		b->max_bus_speed = PCI_SPEED_UNKNOWN;
+		b->cur_bus_speed = PCI_SPEED_UNKNOWN;
 	}
 	return b;
 }
 
+static unsigned char pcix_bus_speed[] = {
+	PCI_SPEED_UNKNOWN,		/* 0 */
+	PCI_SPEED_66MHz_PCIX,		/* 1 */
+	PCI_SPEED_100MHz_PCIX,		/* 2 */
+	PCI_SPEED_133MHz_PCIX,		/* 3 */
+	PCI_SPEED_UNKNOWN,		/* 4 */
+	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
+	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
+	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
+	PCI_SPEED_UNKNOWN,		/* 8 */
+	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
+	PCI_SPEED_100MHz_PCIX_266,	/* A */
+	PCI_SPEED_133MHz_PCIX_266,	/* B */
+	PCI_SPEED_UNKNOWN,		/* C */
+	PCI_SPEED_66MHz_PCIX_533,	/* D */
+	PCI_SPEED_100MHz_PCIX_533,	/* E */
+	PCI_SPEED_133MHz_PCIX_533	/* F */
+};
+
+static unsigned char pcie_link_speed[] = {
+	PCI_SPEED_UNKNOWN,		/* 0 */
+	PCIE_SPEED_2_5GT,		/* 1 */
+	PCIE_SPEED_5_0GT,		/* 2 */
+	PCIE_SPEED_8_0GT,		/* 3 */
+	PCI_SPEED_UNKNOWN,		/* 4 */
+	PCI_SPEED_UNKNOWN,		/* 5 */
+	PCI_SPEED_UNKNOWN,		/* 6 */
+	PCI_SPEED_UNKNOWN,		/* 7 */
+	PCI_SPEED_UNKNOWN,		/* 8 */
+	PCI_SPEED_UNKNOWN,		/* 9 */
+	PCI_SPEED_UNKNOWN,		/* A */
+	PCI_SPEED_UNKNOWN,		/* B */
+	PCI_SPEED_UNKNOWN,		/* C */
+	PCI_SPEED_UNKNOWN,		/* D */
+	PCI_SPEED_UNKNOWN,		/* E */
+	PCI_SPEED_UNKNOWN		/* F */
+};
+
+void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
+{
+	bus->cur_bus_speed = pcie_link_speed[linksta & 0xf];
+}
+EXPORT_SYMBOL_GPL(pcie_update_link_speed);
+
+static unsigned char agp_speeds[] = {
+	AGP_UNKNOWN,
+	AGP_1X,
+	AGP_2X,
+	AGP_4X,
+	AGP_8X
+};
+
+static enum pci_bus_speed agp_speed(int agp3, int agpstat)
+{
+	int index = 0;
+
+	if (agpstat & 4)
+		index = 3;
+	else if (agpstat & 2)
+		index = 2;
+	else if (agpstat & 1)
+		index = 1;
+	else
+		goto out;
+	
+	if (agp3) {
+		index += 2;
+		if (index == 5)
+			index = 0;
+	}
+
+ out:
+	return agp_speeds[index];
+}
+
+
+static void pci_set_bus_speed(struct pci_bus *bus)
+{
+	struct pci_dev *bridge = bus->self;
+	int pos;
+
+	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
+	if (!pos)
+		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
+	if (pos) {
+		u32 agpstat, agpcmd;
+
+		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
+		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
+
+		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
+		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
+	}
+
+	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
+	if (pos) {
+		u16 status;
+		enum pci_bus_speed max;
+		pci_read_config_word(bridge, pos + 2, &status);
+
+		if (status & 0x8000) {
+			max = PCI_SPEED_133MHz_PCIX_533;
+		} else if (status & 0x4000) {
+			max = PCI_SPEED_133MHz_PCIX_266;
+		} else if (status & 0x0002) {
+			if (((status >> 12) & 0x3) == 2) {
+				max = PCI_SPEED_133MHz_PCIX_ECC;
+			} else {
+				max = PCI_SPEED_133MHz_PCIX;
+			}
+		} else {
+			max = PCI_SPEED_66MHz_PCIX;
+		}
+
+		bus->max_bus_speed = max;
+		bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf];
+
+		return;
+	}
+
+	pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+	if (pos) {
+		u32 linkcap;
+		u16 linksta;
+
+		pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap);
+		bus->max_bus_speed = pcie_link_speed[linkcap & 0xf];
+
+		pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta);
+		pcie_update_link_speed(bus, linksta);
+	}
+}
+
+
 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
 					   struct pci_dev *bridge, int busnr)
 {
@@ -430,6 +616,8 @@
 	child->self = bridge;
 	child->bridge = get_device(&bridge->dev);
 
+	pci_set_bus_speed(child);
+
 	/* Set up default resource pointers and names.. */
 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
@@ -1081,6 +1269,45 @@
 }
 EXPORT_SYMBOL(pci_scan_single_device);
 
+static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn)
+{
+	u16 cap;
+	unsigned pos, next_fn;
+
+	if (!dev)
+		return 0;
+
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+	if (!pos)
+		return 0;
+	pci_read_config_word(dev, pos + 4, &cap);
+	next_fn = cap >> 8;
+	if (next_fn <= fn)
+		return 0;
+	return next_fn;
+}
+
+static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn)
+{
+	return (fn + 1) % 8;
+}
+
+static unsigned no_next_fn(struct pci_dev *dev, unsigned fn)
+{
+	return 0;
+}
+
+static int only_one_child(struct pci_bus *bus)
+{
+	struct pci_dev *parent = bus->self;
+	if (!parent || !pci_is_pcie(parent))
+		return 0;
+	if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
+	    parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
+		return 1;
+	return 0;
+}
+
 /**
  * pci_scan_slot - scan a PCI slot on a bus for devices.
  * @bus: PCI bus to scan
@@ -1094,21 +1321,30 @@
  */
 int pci_scan_slot(struct pci_bus *bus, int devfn)
 {
-	int fn, nr = 0;
+	unsigned fn, nr = 0;
 	struct pci_dev *dev;
+	unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn;
+
+	if (only_one_child(bus) && (devfn > 0))
+		return 0; /* Already scanned the entire slot */
 
 	dev = pci_scan_single_device(bus, devfn);
-	if (dev && !dev->is_added)	/* new device? */
+	if (!dev)
+		return 0;
+	if (!dev->is_added)
 		nr++;
 
-	if (dev && dev->multifunction) {
-		for (fn = 1; fn < 8; fn++) {
-			dev = pci_scan_single_device(bus, devfn + fn);
-			if (dev) {
-				if (!dev->is_added)
-					nr++;
-				dev->multifunction = 1;
-			}
+	if (pci_ari_enabled(bus))
+		next_fn = next_ari_fn;
+	else if (dev->multifunction)
+		next_fn = next_trad_fn;
+
+	for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) {
+		dev = pci_scan_single_device(bus, devfn + fn);
+		if (dev) {
+			if (!dev->is_added)
+				nr++;
+			dev->multifunction = 1;
 		}
 	}
 
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index d58b940..790eb69 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -25,14 +25,9 @@
 #include <linux/dmi.h>
 #include <linux/pci-aspm.h>
 #include <linux/ioport.h>
+#include <asm/dma.h>	/* isa_dma_bridge_buggy */
 #include "pci.h"
 
-int isa_dma_bridge_buggy;
-EXPORT_SYMBOL(isa_dma_bridge_buggy);
-int pci_pci_problems;
-EXPORT_SYMBOL(pci_pci_problems);
-
-#ifdef CONFIG_PCI_QUIRKS
 /*
  * This quirk function disables memory decoding and releases memory resources
  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
@@ -2612,6 +2607,7 @@
 	}
 	pci_do_fixups(dev, start, end);
 }
+EXPORT_SYMBOL(pci_fixup_device);
 
 static int __init pci_apply_final_quirks(void)
 {
@@ -2723,9 +2719,3 @@
 
 	return -ENOTTY;
 }
-
-#else
-void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
-int pci_dev_specific_reset(struct pci_dev *dev, int probe) { return -ENOTTY; }
-#endif
-EXPORT_SYMBOL(pci_fixup_device);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index c48cd37..bf32f07 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -27,37 +27,83 @@
 #include <linux/slab.h>
 #include "pci.h"
 
-static void pbus_assign_resources_sorted(const struct pci_bus *bus)
-{
-	struct pci_dev *dev;
+struct resource_list_x {
+	struct resource_list_x *next;
 	struct resource *res;
-	struct resource_list head, *list, *tmp;
-	int idx;
+	struct pci_dev *dev;
+	resource_size_t start;
+	resource_size_t end;
+	unsigned long flags;
+};
 
-	head.next = NULL;
-	list_for_each_entry(dev, &bus->devices, bus_list) {
-		u16 class = dev->class >> 8;
+static void add_to_failed_list(struct resource_list_x *head,
+				 struct pci_dev *dev, struct resource *res)
+{
+	struct resource_list_x *list = head;
+	struct resource_list_x *ln = list->next;
+	struct resource_list_x *tmp;
 
-		/* Don't touch classless devices or host bridges or ioapics.  */
-		if (class == PCI_CLASS_NOT_DEFINED ||
-		    class == PCI_CLASS_BRIDGE_HOST)
-			continue;
-
-		/* Don't touch ioapic devices already enabled by firmware */
-		if (class == PCI_CLASS_SYSTEM_PIC) {
-			u16 command;
-			pci_read_config_word(dev, PCI_COMMAND, &command);
-			if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
-				continue;
-		}
-
-		pdev_sort_resources(dev, &head);
+	tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+	if (!tmp) {
+		pr_warning("add_to_failed_list: kmalloc() failed!\n");
+		return;
 	}
 
-	for (list = head.next; list;) {
+	tmp->next = ln;
+	tmp->res = res;
+	tmp->dev = dev;
+	tmp->start = res->start;
+	tmp->end = res->end;
+	tmp->flags = res->flags;
+	list->next = tmp;
+}
+
+static void free_failed_list(struct resource_list_x *head)
+{
+	struct resource_list_x *list, *tmp;
+
+	for (list = head->next; list;) {
+		tmp = list;
+		list = list->next;
+		kfree(tmp);
+	}
+
+	head->next = NULL;
+}
+
+static void __dev_sort_resources(struct pci_dev *dev,
+				 struct resource_list *head)
+{
+	u16 class = dev->class >> 8;
+
+	/* Don't touch classless devices or host bridges or ioapics.  */
+	if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
+		return;
+
+	/* Don't touch ioapic devices already enabled by firmware */
+	if (class == PCI_CLASS_SYSTEM_PIC) {
+		u16 command;
+		pci_read_config_word(dev, PCI_COMMAND, &command);
+		if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+			return;
+	}
+
+	pdev_sort_resources(dev, head);
+}
+
+static void __assign_resources_sorted(struct resource_list *head,
+				 struct resource_list_x *fail_head)
+{
+	struct resource *res;
+	struct resource_list *list, *tmp;
+	int idx;
+
+	for (list = head->next; list;) {
 		res = list->res;
 		idx = res - &list->dev->resource[0];
 		if (pci_assign_resource(list->dev, idx)) {
+			if (fail_head && !pci_is_root_bus(list->dev->bus))
+				add_to_failed_list(fail_head, list->dev, res);
 			res->start = 0;
 			res->end = 0;
 			res->flags = 0;
@@ -68,6 +114,30 @@
 	}
 }
 
+static void pdev_assign_resources_sorted(struct pci_dev *dev,
+				 struct resource_list_x *fail_head)
+{
+	struct resource_list head;
+
+	head.next = NULL;
+	__dev_sort_resources(dev, &head);
+	__assign_resources_sorted(&head, fail_head);
+
+}
+
+static void pbus_assign_resources_sorted(const struct pci_bus *bus,
+					 struct resource_list_x *fail_head)
+{
+	struct pci_dev *dev;
+	struct resource_list head;
+
+	head.next = NULL;
+	list_for_each_entry(dev, &bus->devices, bus_list)
+		__dev_sort_resources(dev, &head);
+
+	__assign_resources_sorted(&head, fail_head);
+}
+
 void pci_setup_cardbus(struct pci_bus *bus)
 {
 	struct pci_dev *bridge = bus->self;
@@ -134,18 +204,12 @@
    config space writes, so it's quite possible that an I/O window of
    the bridge will have some undesirable address (e.g. 0) after the
    first write. Ditto 64-bit prefetchable MMIO.  */
-static void pci_setup_bridge(struct pci_bus *bus)
+static void pci_setup_bridge_io(struct pci_bus *bus)
 {
 	struct pci_dev *bridge = bus->self;
 	struct resource *res;
 	struct pci_bus_region region;
-	u32 l, bu, lu, io_upper16;
-
-	if (pci_is_enabled(bridge))
-		return;
-
-	dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
-		 bus->secondary, bus->subordinate);
+	u32 l, io_upper16;
 
 	/* Set up the top and bottom of the PCI I/O segment for this bus. */
 	res = bus->resource[0];
@@ -158,8 +222,7 @@
 		/* Set up upper 16 bits of I/O base/limit. */
 		io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
-	}
-	else {
+	} else {
 		/* Clear upper 16 bits of I/O base/limit. */
 		io_upper16 = 0;
 		l = 0x00f0;
@@ -171,21 +234,35 @@
 	pci_write_config_dword(bridge, PCI_IO_BASE, l);
 	/* Update upper 16 bits of I/O base/limit. */
 	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
+}
 
-	/* Set up the top and bottom of the PCI Memory segment
-	   for this bus. */
+static void pci_setup_bridge_mmio(struct pci_bus *bus)
+{
+	struct pci_dev *bridge = bus->self;
+	struct resource *res;
+	struct pci_bus_region region;
+	u32 l;
+
+	/* Set up the top and bottom of the PCI Memory segment for this bus. */
 	res = bus->resource[1];
 	pcibios_resource_to_bus(bridge, &region, res);
 	if (res->flags & IORESOURCE_MEM) {
 		l = (region.start >> 16) & 0xfff0;
 		l |= region.end & 0xfff00000;
 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
-	}
-	else {
+	} else {
 		l = 0x0000fff0;
 		dev_info(&bridge->dev, "  bridge window [mem disabled]\n");
 	}
 	pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
+}
+
+static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
+{
+	struct pci_dev *bridge = bus->self;
+	struct resource *res;
+	struct pci_bus_region region;
+	u32 l, bu, lu;
 
 	/* Clear out the upper 32 bits of PREF limit.
 	   If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
@@ -204,8 +281,7 @@
 			lu = upper_32_bits(region.end);
 		}
 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
-	}
-	else {
+	} else {
 		l = 0x0000fff0;
 		dev_info(&bridge->dev, "  bridge window [mem pref disabled]\n");
 	}
@@ -214,10 +290,35 @@
 	/* Set the upper 32 bits of PREF base & limit. */
 	pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
 	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+}
+
+static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
+{
+	struct pci_dev *bridge = bus->self;
+
+	dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
+		 bus->secondary, bus->subordinate);
+
+	if (type & IORESOURCE_IO)
+		pci_setup_bridge_io(bus);
+
+	if (type & IORESOURCE_MEM)
+		pci_setup_bridge_mmio(bus);
+
+	if (type & IORESOURCE_PREFETCH)
+		pci_setup_bridge_mmio_pref(bus);
 
 	pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
 }
 
+static void pci_setup_bridge(struct pci_bus *bus)
+{
+	unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
+				  IORESOURCE_PREFETCH;
+
+	__pci_setup_bridge(bus, type);
+}
+
 /* Check whether the bridge supports optional I/O and
    prefetchable memory ranges. If not, the respective
    base/limit registers must be read-only and read as 0. */
@@ -253,8 +354,11 @@
 	}
 	if (pmem) {
 		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
-		if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64)
+		if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
+		    PCI_PREF_RANGE_TYPE_64) {
 			b_res[2].flags |= IORESOURCE_MEM_64;
+			b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
+		}
 	}
 
 	/* double check if bridge does support 64 bit pref */
@@ -283,8 +387,7 @@
 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
 				  IORESOURCE_PREFETCH;
 
-	for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
-		r = bus->resource[i];
+	pci_bus_for_each_resource(bus, r, i) {
 		if (r == &ioport_resource || r == &iomem_resource)
 			continue;
 		if (r && (r->flags & type_mask) == type && !r->parent)
@@ -301,7 +404,7 @@
 {
 	struct pci_dev *dev;
 	struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
-	unsigned long size = 0, size1 = 0;
+	unsigned long size = 0, size1 = 0, old_size;
 
 	if (!b_res)
  		return;
@@ -326,12 +429,17 @@
 	}
 	if (size < min_size)
 		size = min_size;
+	old_size = resource_size(b_res);
+	if (old_size == 1)
+		old_size = 0;
 /* To be fixed in 2.5: we should have sort of HAVE_ISA
    flag in the struct pci_bus. */
 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
 	size = (size & 0xff) + ((size & ~0xffUL) << 2);
 #endif
 	size = ALIGN(size + size1, 4096);
+	if (size < old_size)
+		size = old_size;
 	if (!size) {
 		if (b_res->start || b_res->end)
 			dev_info(&bus->self->dev, "disabling bridge window "
@@ -352,7 +460,7 @@
 			 unsigned long type, resource_size_t min_size)
 {
 	struct pci_dev *dev;
-	resource_size_t min_align, align, size;
+	resource_size_t min_align, align, size, old_size;
 	resource_size_t aligns[12];	/* Alignments from 1Mb to 2Gb */
 	int order, max_order;
 	struct resource *b_res = find_free_bus_resource(bus, type);
@@ -402,6 +510,11 @@
 	}
 	if (size < min_size)
 		size = min_size;
+	old_size = resource_size(b_res);
+	if (old_size == 1)
+		old_size = 0;
+	if (size < old_size)
+		size = old_size;
 
 	align = 0;
 	min_align = 0;
@@ -538,23 +651,25 @@
 }
 EXPORT_SYMBOL(pci_bus_size_bridges);
 
-void __ref pci_bus_assign_resources(const struct pci_bus *bus)
+static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
+					 struct resource_list_x *fail_head)
 {
 	struct pci_bus *b;
 	struct pci_dev *dev;
 
-	pbus_assign_resources_sorted(bus);
+	pbus_assign_resources_sorted(bus, fail_head);
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		b = dev->subordinate;
 		if (!b)
 			continue;
 
-		pci_bus_assign_resources(b);
+		__pci_bus_assign_resources(b, fail_head);
 
 		switch (dev->class >> 8) {
 		case PCI_CLASS_BRIDGE_PCI:
-			pci_setup_bridge(b);
+			if (!pci_is_enabled(dev))
+				pci_setup_bridge(b);
 			break;
 
 		case PCI_CLASS_BRIDGE_CARDBUS:
@@ -568,15 +683,130 @@
 		}
 	}
 }
+
+void __ref pci_bus_assign_resources(const struct pci_bus *bus)
+{
+	__pci_bus_assign_resources(bus, NULL);
+}
 EXPORT_SYMBOL(pci_bus_assign_resources);
 
+static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
+					 struct resource_list_x *fail_head)
+{
+	struct pci_bus *b;
+
+	pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head);
+
+	b = bridge->subordinate;
+	if (!b)
+		return;
+
+	__pci_bus_assign_resources(b, fail_head);
+
+	switch (bridge->class >> 8) {
+	case PCI_CLASS_BRIDGE_PCI:
+		pci_setup_bridge(b);
+		break;
+
+	case PCI_CLASS_BRIDGE_CARDBUS:
+		pci_setup_cardbus(b);
+		break;
+
+	default:
+		dev_info(&bridge->dev, "not setting up bridge for bus "
+			 "%04x:%02x\n", pci_domain_nr(b), b->number);
+		break;
+	}
+}
+static void pci_bridge_release_resources(struct pci_bus *bus,
+					  unsigned long type)
+{
+	int idx;
+	bool changed = false;
+	struct pci_dev *dev;
+	struct resource *r;
+	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
+				  IORESOURCE_PREFETCH;
+
+	dev = bus->self;
+	for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
+	     idx++) {
+		r = &dev->resource[idx];
+		if ((r->flags & type_mask) != type)
+			continue;
+		if (!r->parent)
+			continue;
+		/*
+		 * if there are children under that, we should release them
+		 *  all
+		 */
+		release_child_resources(r);
+		if (!release_resource(r)) {
+			dev_printk(KERN_DEBUG, &dev->dev,
+				 "resource %d %pR released\n", idx, r);
+			/* keep the old size */
+			r->end = resource_size(r) - 1;
+			r->start = 0;
+			r->flags = 0;
+			changed = true;
+		}
+	}
+
+	if (changed) {
+		/* avoiding touch the one without PREF */
+		if (type & IORESOURCE_PREFETCH)
+			type = IORESOURCE_PREFETCH;
+		__pci_setup_bridge(bus, type);
+	}
+}
+
+enum release_type {
+	leaf_only,
+	whole_subtree,
+};
+/*
+ * try to release pci bridge resources that is from leaf bridge,
+ * so we can allocate big new one later
+ */
+static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
+						   unsigned long type,
+						   enum release_type rel_type)
+{
+	struct pci_dev *dev;
+	bool is_leaf_bridge = true;
+
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		struct pci_bus *b = dev->subordinate;
+		if (!b)
+			continue;
+
+		is_leaf_bridge = false;
+
+		if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+			continue;
+
+		if (rel_type == whole_subtree)
+			pci_bus_release_bridge_resources(b, type,
+						 whole_subtree);
+	}
+
+	if (pci_is_root_bus(bus))
+		return;
+
+	if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+		return;
+
+	if ((rel_type == whole_subtree) || is_leaf_bridge)
+		pci_bridge_release_resources(bus, type);
+}
+
 static void pci_bus_dump_res(struct pci_bus *bus)
 {
-        int i;
+	struct resource *res;
+	int i;
 
-        for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
-                struct resource *res = bus->resource[i];
-                if (!res || !res->end)
+	pci_bus_for_each_resource(bus, res, i) {
+		if (!res || !res->end || !res->flags)
                         continue;
 
 		dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
@@ -600,11 +830,65 @@
 	}
 }
 
+static int __init pci_bus_get_depth(struct pci_bus *bus)
+{
+	int depth = 0;
+	struct pci_dev *dev;
+
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		int ret;
+		struct pci_bus *b = dev->subordinate;
+		if (!b)
+			continue;
+
+		ret = pci_bus_get_depth(b);
+		if (ret + 1 > depth)
+			depth = ret + 1;
+	}
+
+	return depth;
+}
+static int __init pci_get_max_depth(void)
+{
+	int depth = 0;
+	struct pci_bus *bus;
+
+	list_for_each_entry(bus, &pci_root_buses, node) {
+		int ret;
+
+		ret = pci_bus_get_depth(bus);
+		if (ret > depth)
+			depth = ret;
+	}
+
+	return depth;
+}
+
+/*
+ * first try will not touch pci bridge res
+ * second  and later try will clear small leaf bridge res
+ * will stop till to the max  deepth if can not find good one
+ */
 void __init
 pci_assign_unassigned_resources(void)
 {
 	struct pci_bus *bus;
+	int tried_times = 0;
+	enum release_type rel_type = leaf_only;
+	struct resource_list_x head, *list;
+	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
+				  IORESOURCE_PREFETCH;
+	unsigned long failed_type;
+	int max_depth = pci_get_max_depth();
+	int pci_try_num;
 
+	head.next = NULL;
+
+	pci_try_num = max_depth + 1;
+	printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
+		 max_depth, pci_try_num);
+
+again:
 	/* Depth first, calculate sizes and alignments of all
 	   subordinate buses. */
 	list_for_each_entry(bus, &pci_root_buses, node) {
@@ -612,12 +896,130 @@
 	}
 	/* Depth last, allocate resources and update the hardware. */
 	list_for_each_entry(bus, &pci_root_buses, node) {
-		pci_bus_assign_resources(bus);
-		pci_enable_bridges(bus);
+		__pci_bus_assign_resources(bus, &head);
 	}
+	tried_times++;
+
+	/* any device complain? */
+	if (!head.next)
+		goto enable_and_dump;
+	failed_type = 0;
+	for (list = head.next; list;) {
+		failed_type |= list->flags;
+		list = list->next;
+	}
+	/*
+	 * io port are tight, don't try extra
+	 * or if reach the limit, don't want to try more
+	 */
+	failed_type &= type_mask;
+	if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) {
+		free_failed_list(&head);
+		goto enable_and_dump;
+	}
+
+	printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
+			 tried_times + 1);
+
+	/* third times and later will not check if it is leaf */
+	if ((tried_times + 1) > 2)
+		rel_type = whole_subtree;
+
+	/*
+	 * Try to release leaf bridge's resources that doesn't fit resource of
+	 * child device under that bridge
+	 */
+	for (list = head.next; list;) {
+		bus = list->dev->bus;
+		pci_bus_release_bridge_resources(bus, list->flags & type_mask,
+						  rel_type);
+		list = list->next;
+	}
+	/* restore size and flags */
+	for (list = head.next; list;) {
+		struct resource *res = list->res;
+
+		res->start = list->start;
+		res->end = list->end;
+		res->flags = list->flags;
+		if (list->dev->subordinate)
+			res->flags = 0;
+
+		list = list->next;
+	}
+	free_failed_list(&head);
+
+	goto again;
+
+enable_and_dump:
+	/* Depth last, update the hardware. */
+	list_for_each_entry(bus, &pci_root_buses, node)
+		pci_enable_bridges(bus);
 
 	/* dump the resource on buses */
 	list_for_each_entry(bus, &pci_root_buses, node) {
 		pci_bus_dump_resources(bus);
 	}
 }
+
+void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
+{
+	struct pci_bus *parent = bridge->subordinate;
+	int tried_times = 0;
+	struct resource_list_x head, *list;
+	int retval;
+	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
+				  IORESOURCE_PREFETCH;
+
+	head.next = NULL;
+
+again:
+	pci_bus_size_bridges(parent);
+	__pci_bridge_assign_resources(bridge, &head);
+	retval = pci_reenable_device(bridge);
+	pci_set_master(bridge);
+	pci_enable_bridges(parent);
+
+	tried_times++;
+
+	if (!head.next)
+		return;
+
+	if (tried_times >= 2) {
+		/* still fail, don't need to try more */
+		free_failed_list(&head);
+		return;
+	}
+
+	printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
+			 tried_times + 1);
+
+	/*
+	 * Try to release leaf bridge's resources that doesn't fit resource of
+	 * child device under that bridge
+	 */
+	for (list = head.next; list;) {
+		struct pci_bus *bus = list->dev->bus;
+		unsigned long flags = list->flags;
+
+		pci_bus_release_bridge_resources(bus, flags & type_mask,
+						 whole_subtree);
+		list = list->next;
+	}
+	/* restore size and flags */
+	for (list = head.next; list;) {
+		struct resource *res = list->res;
+
+		res->start = list->start;
+		res->end = list->end;
+		res->flags = list->flags;
+		if (list->dev->subordinate)
+			res->flags = 0;
+
+		list = list->next;
+	}
+	free_failed_list(&head);
+
+	goto again;
+}
+EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 8c02b6c..49c9e6c 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -47,6 +47,55 @@
 				slot->number);
 }
 
+/* these strings match up with the values in pci_bus_speed */
+static char *pci_bus_speed_strings[] = {
+	"33 MHz PCI",		/* 0x00 */
+	"66 MHz PCI",		/* 0x01 */
+	"66 MHz PCI-X", 	/* 0x02 */
+	"100 MHz PCI-X",	/* 0x03 */
+	"133 MHz PCI-X",	/* 0x04 */
+	NULL,			/* 0x05 */
+	NULL,			/* 0x06 */
+	NULL,			/* 0x07 */
+	NULL,			/* 0x08 */
+	"66 MHz PCI-X 266",	/* 0x09 */
+	"100 MHz PCI-X 266",	/* 0x0a */
+	"133 MHz PCI-X 266",	/* 0x0b */
+	"Unknown AGP",		/* 0x0c */
+	"1x AGP",		/* 0x0d */
+	"2x AGP",		/* 0x0e */
+	"4x AGP",		/* 0x0f */
+	"8x AGP",		/* 0x10 */
+	"66 MHz PCI-X 533",	/* 0x11 */
+	"100 MHz PCI-X 533",	/* 0x12 */
+	"133 MHz PCI-X 533",	/* 0x13 */
+	"2.5 GT/s PCIe",	/* 0x14 */
+	"5.0 GT/s PCIe",	/* 0x15 */
+	"8.0 GT/s PCIe",	/* 0x16 */
+};
+
+static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf)
+{
+	const char *speed_string;
+
+	if (speed < ARRAY_SIZE(pci_bus_speed_strings))
+		speed_string = pci_bus_speed_strings[speed];
+	else
+		speed_string = "Unknown";
+
+	return sprintf(buf, "%s\n", speed_string);
+}
+
+static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf)
+{
+	return bus_speed_read(slot->bus->max_bus_speed, buf);
+}
+
+static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf)
+{
+	return bus_speed_read(slot->bus->cur_bus_speed, buf);
+}
+
 static void pci_slot_release(struct kobject *kobj)
 {
 	struct pci_dev *dev;
@@ -66,9 +115,15 @@
 
 static struct pci_slot_attribute pci_slot_attr_address =
 	__ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL);
+static struct pci_slot_attribute pci_slot_attr_max_speed =
+	__ATTR(max_bus_speed, (S_IFREG | S_IRUGO), max_speed_read_file, NULL);
+static struct pci_slot_attribute pci_slot_attr_cur_speed =
+	__ATTR(cur_bus_speed, (S_IFREG | S_IRUGO), cur_speed_read_file, NULL);
 
 static struct attribute *pci_slot_default_attrs[] = {
 	&pci_slot_attr_address.attr,
+	&pci_slot_attr_max_speed.attr,
+	&pci_slot_attr_cur_speed.attr,
 	NULL,
 };
 
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index 52db172..f8401a0 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -114,22 +114,21 @@
 	unsigned long	offset;
 };
 
-static void pcmcia_align(void *align_data, struct resource *res,
-			unsigned long size, unsigned long align)
+static resource_size_t pcmcia_align(void *align_data,
+				const struct resource *res,
+				resource_size_t size, resource_size_t align)
 {
 	struct pcmcia_align_data *data = align_data;
-	unsigned long start;
+	resource_size_t start;
 
 	start = (res->start & ~data->mask) + data->offset;
 	if (start < res->start)
 		start += data->mask + 1;
-	res->start = start;
 
 #ifdef CONFIG_X86
 	if (res->flags & IORESOURCE_IO) {
 		if (start & 0x300) {
 			start = (start + 0x3ff) & ~0x3ff;
-			res->start = start;
 		}
 	}
 #endif
@@ -137,9 +136,11 @@
 #ifdef CONFIG_M68K
 	if (res->flags & IORESOURCE_IO) {
 		if ((res->start + size - 1) >= 1024)
-			res->start = res->end;
+			start = res->end;
 	}
 #endif
+
+	return start;
 }
 
 
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 9b0dc43..c67638f 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -533,8 +533,8 @@
 	struct resource_map	*map;
 };
 
-static void
-pcmcia_common_align(void *align_data, struct resource *res,
+static resource_size_t
+pcmcia_common_align(void *align_data, const struct resource *res,
 			resource_size_t size, resource_size_t align)
 {
 	struct pcmcia_align_data *data = align_data;
@@ -545,17 +545,18 @@
 	start = (res->start & ~data->mask) + data->offset;
 	if (start < res->start)
 		start += data->mask + 1;
-	res->start = start;
+	return start;
 }
 
-static void
-pcmcia_align(void *align_data, struct resource *res, resource_size_t size,
-		resource_size_t align)
+static resource_size_t
+pcmcia_align(void *align_data, const struct resource *res,
+	resource_size_t size, resource_size_t align)
 {
 	struct pcmcia_align_data *data = align_data;
 	struct resource_map *m;
+	resource_size_t start;
 
-	pcmcia_common_align(data, res, size, align);
+	start = pcmcia_common_align(data, res, size, align);
 
 	for (m = data->map->next; m != data->map; m = m->next) {
 		unsigned long start = m->base;
@@ -567,8 +568,7 @@
 		 * fit here.
 		 */
 		if (res->start < start) {
-			res->start = start;
-			pcmcia_common_align(data, res, size, align);
+			start = pcmcia_common_align(data, res, size, align);
 		}
 
 		/*
@@ -586,7 +586,9 @@
 	 * If we failed to find something suitable, ensure we fail.
 	 */
 	if (m == data->map)
-		res->start = res->end;
+		start = res->end;
+
+	return start;
 }
 
 /*
@@ -801,8 +803,7 @@
 		return -EINVAL;
 #endif
 
-	for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
-		res = s->cb_dev->bus->resource[i];
+	pci_bus_for_each_resource(s->cb_dev->bus, res, i) {
 		if (!res)
 			continue;
 
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index e4d12ac..1f2039d 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -649,9 +649,10 @@
 static int yenta_search_res(struct yenta_socket *socket, struct resource *res,
 			    u32 min)
 {
+	struct resource *root;
 	int i;
-	for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
-		struct resource *root = socket->dev->bus->resource[i];
+
+	pci_bus_for_each_resource(socket->dev->bus, root, i) {
 		if (!root)
 			continue;
 
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9d0c941..66d6c01 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
  *
  * Module interface and handling of zfcp data structures.
  *
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
  */
 
 /*
@@ -32,6 +32,7 @@
 #include <linux/seq_file.h>
 #include "zfcp_ext.h"
 #include "zfcp_fc.h"
+#include "zfcp_reqlist.h"
 
 #define ZFCP_BUS_ID_SIZE	20
 
@@ -49,36 +50,6 @@
 	return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
 }
 
-static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
-{
-	int idx;
-
-	adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
-				    GFP_KERNEL);
-	if (!adapter->req_list)
-		return -ENOMEM;
-
-	for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
-		INIT_LIST_HEAD(&adapter->req_list[idx]);
-	return 0;
-}
-
-/**
- * zfcp_reqlist_isempty - is the request list empty
- * @adapter: pointer to struct zfcp_adapter
- *
- * Returns: true if list is empty, false otherwise
- */
-int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
-{
-	unsigned int idx;
-
-	for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
-		if (!list_empty(&adapter->req_list[idx]))
-			return 0;
-	return 1;
-}
-
 static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
 {
 	struct ccw_device *cdev;
@@ -110,7 +81,7 @@
 	flush_work(&unit->scsi_work);
 
 out_unit:
-	put_device(&port->sysfs_device);
+	put_device(&port->dev);
 out_port:
 	zfcp_ccw_adapter_put(adapter);
 out_ccw_device:
@@ -255,7 +226,7 @@
 	read_lock_irqsave(&port->unit_list_lock, flags);
 	list_for_each_entry(unit, &port->unit_list, list)
 		if (unit->fcp_lun == fcp_lun) {
-			if (!get_device(&unit->sysfs_device))
+			if (!get_device(&unit->dev))
 				unit = NULL;
 			read_unlock_irqrestore(&port->unit_list_lock, flags);
 			return unit;
@@ -280,7 +251,7 @@
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list)
 		if (port->wwpn == wwpn) {
-			if (!get_device(&port->sysfs_device))
+			if (!get_device(&port->dev))
 				port = NULL;
 			read_unlock_irqrestore(&adapter->port_list_lock, flags);
 			return port;
@@ -298,10 +269,9 @@
  */
 static void zfcp_unit_release(struct device *dev)
 {
-	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit,
-					      sysfs_device);
+	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
 
-	put_device(&unit->port->sysfs_device);
+	put_device(&unit->port->dev);
 	kfree(unit);
 }
 
@@ -318,11 +288,11 @@
 	struct zfcp_unit *unit;
 	int retval = -ENOMEM;
 
-	get_device(&port->sysfs_device);
+	get_device(&port->dev);
 
 	unit = zfcp_get_unit_by_lun(port, fcp_lun);
 	if (unit) {
-		put_device(&unit->sysfs_device);
+		put_device(&unit->dev);
 		retval = -EEXIST;
 		goto err_out;
 	}
@@ -333,10 +303,10 @@
 
 	unit->port = port;
 	unit->fcp_lun = fcp_lun;
-	unit->sysfs_device.parent = &port->sysfs_device;
-	unit->sysfs_device.release = zfcp_unit_release;
+	unit->dev.parent = &port->dev;
+	unit->dev.release = zfcp_unit_release;
 
-	if (dev_set_name(&unit->sysfs_device, "0x%016llx",
+	if (dev_set_name(&unit->dev, "0x%016llx",
 			 (unsigned long long) fcp_lun)) {
 		kfree(unit);
 		goto err_out;
@@ -353,13 +323,12 @@
 	unit->latencies.cmd.channel.min = 0xFFFFFFFF;
 	unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
 
-	if (device_register(&unit->sysfs_device)) {
-		put_device(&unit->sysfs_device);
+	if (device_register(&unit->dev)) {
+		put_device(&unit->dev);
 		goto err_out;
 	}
 
-	if (sysfs_create_group(&unit->sysfs_device.kobj,
-			       &zfcp_sysfs_unit_attrs))
+	if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
 		goto err_out_put;
 
 	write_lock_irq(&port->unit_list_lock);
@@ -371,9 +340,9 @@
 	return unit;
 
 err_out_put:
-	device_unregister(&unit->sysfs_device);
+	device_unregister(&unit->dev);
 err_out:
-	put_device(&port->sysfs_device);
+	put_device(&port->dev);
 	return ERR_PTR(retval);
 }
 
@@ -539,7 +508,8 @@
 	if (zfcp_allocate_low_mem_buffers(adapter))
 		goto failed;
 
-	if (zfcp_reqlist_alloc(adapter))
+	adapter->req_list = zfcp_reqlist_alloc();
+	if (!adapter->req_list)
 		goto failed;
 
 	if (zfcp_dbf_adapter_register(adapter))
@@ -560,8 +530,6 @@
 	INIT_LIST_HEAD(&adapter->erp_ready_head);
 	INIT_LIST_HEAD(&adapter->erp_running_head);
 
-	spin_lock_init(&adapter->req_list_lock);
-
 	rwlock_init(&adapter->erp_lock);
 	rwlock_init(&adapter->abort_lock);
 
@@ -640,8 +608,7 @@
 
 static void zfcp_port_release(struct device *dev)
 {
-	struct zfcp_port *port = container_of(dev, struct zfcp_port,
-					      sysfs_device);
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
 
 	zfcp_ccw_adapter_put(port->adapter);
 	kfree(port);
@@ -669,7 +636,7 @@
 
 	port = zfcp_get_port_by_wwpn(adapter, wwpn);
 	if (port) {
-		put_device(&port->sysfs_device);
+		put_device(&port->dev);
 		retval = -EEXIST;
 		goto err_out;
 	}
@@ -689,22 +656,21 @@
 	port->d_id = d_id;
 	port->wwpn = wwpn;
 	port->rport_task = RPORT_NONE;
-	port->sysfs_device.parent = &adapter->ccw_device->dev;
-	port->sysfs_device.release = zfcp_port_release;
+	port->dev.parent = &adapter->ccw_device->dev;
+	port->dev.release = zfcp_port_release;
 
-	if (dev_set_name(&port->sysfs_device, "0x%016llx",
-			 (unsigned long long)wwpn)) {
+	if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
 		kfree(port);
 		goto err_out;
 	}
 	retval = -EINVAL;
 
-	if (device_register(&port->sysfs_device)) {
-		put_device(&port->sysfs_device);
+	if (device_register(&port->dev)) {
+		put_device(&port->dev);
 		goto err_out;
 	}
 
-	if (sysfs_create_group(&port->sysfs_device.kobj,
+	if (sysfs_create_group(&port->dev.kobj,
 			       &zfcp_sysfs_port_attrs))
 		goto err_out_put;
 
@@ -717,7 +683,7 @@
 	return port;
 
 err_out_put:
-	device_unregister(&port->sysfs_device);
+	device_unregister(&port->dev);
 err_out:
 	zfcp_ccw_adapter_put(adapter);
 	return ERR_PTR(retval);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index c22cb72..ce1cc7a 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -3,13 +3,14 @@
  *
  * Registration and callback for the s390 common I/O layer.
  *
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
  */
 
 #define KMSG_COMPONENT "zfcp"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include "zfcp_ext.h"
+#include "zfcp_reqlist.h"
 
 #define ZFCP_MODEL_PRIV 0x4
 
@@ -122,12 +123,10 @@
 	zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
 
 	list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
-		zfcp_device_unregister(&unit->sysfs_device,
-				       &zfcp_sysfs_unit_attrs);
+		zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
 
 	list_for_each_entry_safe(port, p, &port_remove_lh, list)
-		zfcp_device_unregister(&port->sysfs_device,
-				       &zfcp_sysfs_port_attrs);
+		zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
 
 	zfcp_adapter_unregister(adapter);
 }
@@ -162,7 +161,7 @@
 	}
 
 	/* initialize request counter */
-	BUG_ON(!zfcp_reqlist_isempty(adapter));
+	BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
 	adapter->req_no = 0;
 
 	zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 7369c89..7a149fd 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -140,9 +140,9 @@
 	memcpy(response->fsf_status_qual,
 	       fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
 	response->fsf_req_status = fsf_req->status;
-	response->sbal_first = fsf_req->queue_req.sbal_first;
-	response->sbal_last = fsf_req->queue_req.sbal_last;
-	response->sbal_response = fsf_req->queue_req.sbal_response;
+	response->sbal_first = fsf_req->qdio_req.sbal_first;
+	response->sbal_last = fsf_req->qdio_req.sbal_last;
+	response->sbal_response = fsf_req->qdio_req.sbal_response;
 	response->pool = fsf_req->pool != NULL;
 	response->erp_action = (unsigned long)fsf_req->erp_action;
 
@@ -576,7 +576,8 @@
 	struct zfcp_adapter *adapter = dbf->adapter;
 
 	zfcp_dbf_rec_target(id, ref, dbf, &adapter->status,
-				  &adapter->erp_counter, 0, 0, 0);
+			    &adapter->erp_counter, 0, 0,
+			    ZFCP_DBF_INVALID_LUN);
 }
 
 /**
@@ -590,8 +591,8 @@
 	struct zfcp_dbf *dbf = port->adapter->dbf;
 
 	zfcp_dbf_rec_target(id, ref, dbf, &port->status,
-				  &port->erp_counter, port->wwpn, port->d_id,
-				  0);
+			    &port->erp_counter, port->wwpn, port->d_id,
+			    ZFCP_DBF_INVALID_LUN);
 }
 
 /**
@@ -642,10 +643,9 @@
 		r->u.trigger.ps = atomic_read(&port->status);
 		r->u.trigger.wwpn = port->wwpn;
 	}
-	if (unit) {
+	if (unit)
 		r->u.trigger.us = atomic_read(&unit->status);
-		r->u.trigger.fcp_lun = unit->fcp_lun;
-	}
+	r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN;
 	debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
@@ -668,7 +668,7 @@
 	r->u.action.action = (unsigned long)erp_action;
 	r->u.action.status = erp_action->status;
 	r->u.action.step = erp_action->step;
-	r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
+	r->u.action.fsf_req = erp_action->fsf_req_id;
 	debug_event(dbf->rec, 5, r, sizeof(*r));
 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 8b7fd9a..457e046 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -30,6 +30,8 @@
 #define ZFCP_DBF_TAG_SIZE      4
 #define ZFCP_DBF_ID_SIZE       7
 
+#define ZFCP_DBF_INVALID_LUN	0xFFFFFFFFFFFFFFFFull
+
 struct zfcp_dbf_dump {
 	u8 tag[ZFCP_DBF_TAG_SIZE];
 	u32 total_size;		/* size of total dump data */
@@ -192,10 +194,10 @@
 		struct zfcp_dbf_san_record_ct_response ct_resp;
 		struct zfcp_dbf_san_record_els els;
 	} u;
-#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
-	u8 payload[32];
 } __attribute__ ((packed));
 
+#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
+
 struct zfcp_dbf_scsi_record {
 	u8 tag[ZFCP_DBF_TAG_SIZE];
 	u8 tag2[ZFCP_DBF_TAG_SIZE];
@@ -301,17 +303,31 @@
 
 /**
  * zfcp_dbf_scsi_result - trace event for SCSI command completion
- * @tag: tag indicating success or failure of SCSI command
- * @level: trace level applicable for this event
- * @adapter: adapter that has been used to issue the SCSI command
+ * @dbf: adapter dbf trace
  * @scmd: SCSI command pointer
- * @fsf_req: request used to issue SCSI command (might be NULL)
+ * @req: FSF request used to issue SCSI command
  */
 static inline
-void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf,
-			  struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
+			  struct zfcp_fsf_req *req)
 {
-	zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0);
+	if (scmd->result != 0)
+		zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0);
+	else if (scmd->retries > 0)
+		zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0);
+	else
+		zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0);
+}
+
+/**
+ * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
+ * @dbf: adapter dbf trace
+ * @scmd: SCSI command pointer
+ */
+static inline
+void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd)
+{
+	zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0);
 }
 
 /**
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e1b5b88..7131c7d 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -3,7 +3,7 @@
  *
  * Global definitions for the zfcp device driver.
  *
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
  */
 
 #ifndef ZFCP_DEF_H
@@ -33,15 +33,13 @@
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_bsg_fc.h>
 #include <asm/ccwdev.h>
-#include <asm/qdio.h>
 #include <asm/debug.h>
 #include <asm/ebcdic.h>
 #include <asm/sysinfo.h>
 #include "zfcp_fsf.h"
+#include "zfcp_qdio.h"
 
-/********************* GENERAL DEFINES *********************************/
-
-#define REQUEST_LIST_SIZE 128
+struct zfcp_reqlist;
 
 /********************* SCSI SPECIFIC DEFINES *********************************/
 #define ZFCP_SCSI_ER_TIMEOUT                    (10*HZ)
@@ -129,12 +127,6 @@
 	mempool_t *qtcb_pool;
 };
 
-struct zfcp_qdio_queue {
-	struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
-	u8		   first;	/* index of next free bfr in queue */
-	atomic_t           count;	/* number of free buffers in queue */
-};
-
 struct zfcp_erp_action {
 	struct list_head list;
 	int action;	              /* requested action code */
@@ -143,8 +135,7 @@
 	struct zfcp_unit *unit;
 	u32		status;	      /* recovery status */
 	u32 step;	              /* active step of this erp action */
-	struct zfcp_fsf_req *fsf_req; /* fsf request currently pending
-					 for this action */
+	unsigned long		fsf_req_id;
 	struct timer_list timer;
 };
 
@@ -167,29 +158,6 @@
 	spinlock_t lock;
 };
 
-/** struct zfcp_qdio - basic QDIO data structure
- * @resp_q: response queue
- * @req_q: request queue
- * @stat_lock: lock to protect req_q_util and req_q_time
- * @req_q_lock; lock to serialize access to request queue
- * @req_q_time: time of last fill level change
- * @req_q_util: used for accounting
- * @req_q_full: queue full incidents
- * @req_q_wq: used to wait for SBAL availability
- * @adapter: adapter used in conjunction with this QDIO structure
- */
-struct zfcp_qdio {
-	struct zfcp_qdio_queue	resp_q;
-	struct zfcp_qdio_queue	req_q;
-	spinlock_t		stat_lock;
-	spinlock_t		req_q_lock;
-	unsigned long long	req_q_time;
-	u64			req_q_util;
-	atomic_t		req_q_full;
-	wait_queue_head_t	req_q_wq;
-	struct zfcp_adapter	*adapter;
-};
-
 struct zfcp_adapter {
 	struct kref		ref;
 	u64			peer_wwnn;	   /* P2P peer WWNN */
@@ -207,8 +175,7 @@
 	struct list_head	port_list;	   /* remote port list */
 	rwlock_t		port_list_lock;    /* port list lock */
 	unsigned long		req_no;		   /* unique FSF req number */
-	struct list_head	*req_list;	   /* list of pending reqs */
-	spinlock_t		req_list_lock;	   /* request list lock */
+	struct zfcp_reqlist	*req_list;
 	u32			fsf_req_seq_no;	   /* FSF cmnd seq number */
 	rwlock_t		abort_lock;        /* Protects against SCSI
 						      stack abort/command
@@ -241,7 +208,7 @@
 };
 
 struct zfcp_port {
-	struct device          sysfs_device;   /* sysfs device */
+	struct device          dev;
 	struct fc_rport        *rport;         /* rport of fc transport class */
 	struct list_head       list;	       /* list of remote ports */
 	struct zfcp_adapter    *adapter;       /* adapter used to access port */
@@ -263,7 +230,7 @@
 };
 
 struct zfcp_unit {
-	struct device          sysfs_device;   /* sysfs device */
+	struct device          dev;
 	struct list_head       list;	       /* list of logical units */
 	struct zfcp_port       *port;	       /* remote port of unit */
 	atomic_t	       status;	       /* status of this logical unit */
@@ -277,33 +244,11 @@
 };
 
 /**
- * struct zfcp_queue_req - queue related values for a request
- * @sbal_number: number of free SBALs
- * @sbal_first: first SBAL for this request
- * @sbal_last: last SBAL for this request
- * @sbal_limit: last possible SBAL for this request
- * @sbale_curr: current SBALE at creation of this request
- * @sbal_response: SBAL used in interrupt
- * @qdio_outb_usage: usage of outbound queue
- * @qdio_inb_usage: usage of inbound queue
- */
-struct zfcp_queue_req {
-	u8		       sbal_number;
-	u8		       sbal_first;
-	u8		       sbal_last;
-	u8		       sbal_limit;
-	u8		       sbale_curr;
-	u8		       sbal_response;
-	u16		       qdio_outb_usage;
-	u16		       qdio_inb_usage;
-};
-
-/**
  * struct zfcp_fsf_req - basic FSF request structure
  * @list: list of FSF requests
  * @req_id: unique request ID
  * @adapter: adapter this request belongs to
- * @queue_req: queue related values
+ * @qdio_req: qdio queue related values
  * @completion: used to signal the completion of the request
  * @status: status of the request
  * @fsf_command: FSF command issued
@@ -321,7 +266,7 @@
 	struct list_head	list;
 	unsigned long		req_id;
 	struct zfcp_adapter	*adapter;
-	struct zfcp_queue_req	queue_req;
+	struct zfcp_qdio_req	qdio_req;
 	struct completion	completion;
 	u32			status;
 	u32			fsf_command;
@@ -352,45 +297,4 @@
 #define ZFCP_SET                0x00000100
 #define ZFCP_CLEAR              0x00000200
 
-/*
- * Helper functions for request ID management.
- */
-static inline int zfcp_reqlist_hash(unsigned long req_id)
-{
-	return req_id % REQUEST_LIST_SIZE;
-}
-
-static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
-				       struct zfcp_fsf_req *fsf_req)
-{
-	list_del(&fsf_req->list);
-}
-
-static inline struct zfcp_fsf_req *
-zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id)
-{
-	struct zfcp_fsf_req *request;
-	unsigned int idx;
-
-	idx = zfcp_reqlist_hash(req_id);
-	list_for_each_entry(request, &adapter->req_list[idx], list)
-		if (request->req_id == req_id)
-			return request;
-	return NULL;
-}
-
-static inline struct zfcp_fsf_req *
-zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
-{
-	struct zfcp_fsf_req *request;
-	unsigned int idx;
-
-	for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) {
-		list_for_each_entry(request, &adapter->req_list[idx], list)
-			if (request == req)
-				return request;
-	}
-	return NULL;
-}
-
 #endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index b51a11a..0be5e7e 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
  *
  * Error Recovery Procedures (ERP).
  *
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -11,6 +11,7 @@
 
 #include <linux/kthread.h>
 #include "zfcp_ext.h"
+#include "zfcp_reqlist.h"
 
 #define ZFCP_MAX_ERPS                   3
 
@@ -174,7 +175,7 @@
 
 	switch (need) {
 	case ZFCP_ERP_ACTION_REOPEN_UNIT:
-		if (!get_device(&unit->sysfs_device))
+		if (!get_device(&unit->dev))
 			return NULL;
 		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
 		erp_action = &unit->erp_action;
@@ -184,7 +185,7 @@
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-		if (!get_device(&port->sysfs_device))
+		if (!get_device(&port->dev))
 			return NULL;
 		zfcp_erp_action_dismiss_port(port);
 		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
@@ -478,26 +479,27 @@
 static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
 {
 	struct zfcp_adapter *adapter = act->adapter;
+	struct zfcp_fsf_req *req;
 
-	if (!act->fsf_req)
+	if (!act->fsf_req_id)
 		return;
 
-	spin_lock(&adapter->req_list_lock);
-	if (zfcp_reqlist_find_safe(adapter, act->fsf_req) &&
-	    act->fsf_req->erp_action == act) {
+	spin_lock(&adapter->req_list->lock);
+	req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
+	if (req && req->erp_action == act) {
 		if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
 				   ZFCP_STATUS_ERP_TIMEDOUT)) {
-			act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+			req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
 			zfcp_dbf_rec_action("erscf_1", act);
-			act->fsf_req->erp_action = NULL;
+			req->erp_action = NULL;
 		}
 		if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
 			zfcp_dbf_rec_action("erscf_2", act);
-		if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
-			act->fsf_req = NULL;
+		if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
+			act->fsf_req_id = 0;
 	} else
-		act->fsf_req = NULL;
-	spin_unlock(&adapter->req_list_lock);
+		act->fsf_req_id = 0;
+	spin_unlock(&adapter->req_list->lock);
 }
 
 /**
@@ -1179,19 +1181,19 @@
 	switch (act->action) {
 	case ZFCP_ERP_ACTION_REOPEN_UNIT:
 		if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
-			get_device(&unit->sysfs_device);
+			get_device(&unit->dev);
 			if (scsi_queue_work(unit->port->adapter->scsi_host,
 					    &unit->scsi_work) <= 0)
-				put_device(&unit->sysfs_device);
+				put_device(&unit->dev);
 		}
-		put_device(&unit->sysfs_device);
+		put_device(&unit->dev);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 		if (result == ZFCP_ERP_SUCCEEDED)
 			zfcp_scsi_schedule_rport_register(port);
-		put_device(&port->sysfs_device);
+		put_device(&port->dev);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 66bdb34..8786a79 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -21,7 +21,6 @@
 extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
 					   u32);
 extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
-extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
 extern void zfcp_sg_free_table(struct scatterlist *, int);
 extern int zfcp_sg_setup_table(struct scatterlist *, int);
 extern void zfcp_device_unregister(struct device *,
@@ -144,13 +143,9 @@
 /* zfcp_qdio.c */
 extern int zfcp_qdio_setup(struct zfcp_adapter *);
 extern void zfcp_qdio_destroy(struct zfcp_qdio *);
-extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *);
-extern struct qdio_buffer_element
-	*zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *);
-extern struct qdio_buffer_element
-	*zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *);
+extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
 extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
-				   struct zfcp_queue_req *, unsigned long,
+				   struct zfcp_qdio_req *, unsigned long,
 				   struct scatterlist *, int);
 extern int zfcp_qdio_open(struct zfcp_qdio *);
 extern void zfcp_qdio_close(struct zfcp_qdio *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 271399f..5219670 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,7 +3,7 @@
  *
  * Fibre Channel related functions for the zfcp device driver.
  *
- * Copyright IBM Corporation 2008, 2009
+ * Copyright IBM Corporation 2008, 2010
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -316,7 +316,7 @@
 
 	zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
 out:
-	put_device(&port->sysfs_device);
+	put_device(&port->dev);
 }
 
 /**
@@ -325,9 +325,9 @@
  */
 void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
 {
-	get_device(&port->sysfs_device);
+	get_device(&port->dev);
 	if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
-		put_device(&port->sysfs_device);
+		put_device(&port->dev);
 }
 
 /**
@@ -389,7 +389,7 @@
 	zfcp_scsi_schedule_rport_register(port);
  out:
 	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
-	put_device(&port->sysfs_device);
+	put_device(&port->dev);
 	kmem_cache_free(zfcp_data.adisc_cache, adisc);
 }
 
@@ -436,7 +436,7 @@
 		container_of(work, struct zfcp_port, test_link_work);
 	int retval;
 
-	get_device(&port->sysfs_device);
+	get_device(&port->dev);
 	port->rport_task = RPORT_DEL;
 	zfcp_scsi_rport_work(&port->rport_work);
 
@@ -455,7 +455,7 @@
 	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
 
 out:
-	put_device(&port->sysfs_device);
+	put_device(&port->dev);
 }
 
 /**
@@ -468,9 +468,9 @@
  */
 void zfcp_fc_test_link(struct zfcp_port *port)
 {
-	get_device(&port->sysfs_device);
+	get_device(&port->dev);
 	if (!queue_work(port->adapter->work_queue, &port->test_link_work))
-		put_device(&port->sysfs_device);
+		put_device(&port->dev);
 }
 
 static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
@@ -617,8 +617,7 @@
 
 	list_for_each_entry_safe(port, tmp, &remove_lh, list) {
 		zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
-		zfcp_device_unregister(&port->sysfs_device,
-				       &zfcp_sysfs_port_attrs);
+		zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
 	}
 
 	return ret;
@@ -731,7 +730,7 @@
 			return -EINVAL;
 
 		d_id = port->d_id;
-		put_device(&port->sysfs_device);
+		put_device(&port->dev);
 	} else
 		d_id = ntoh24(job->request->rqst_data.h_els.port_id);
 
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e8fb4d9..6538742 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
  *
  * Implementation of FSF commands.
  *
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -14,6 +14,8 @@
 #include "zfcp_ext.h"
 #include "zfcp_fc.h"
 #include "zfcp_dbf.h"
+#include "zfcp_qdio.h"
+#include "zfcp_reqlist.h"
 
 static void zfcp_fsf_request_timeout_handler(unsigned long data)
 {
@@ -393,7 +395,7 @@
 	case FSF_PROT_LINK_DOWN:
 		zfcp_fsf_link_down_info_eval(req, "fspse_5",
 					     &psq->link_down_info);
-		/* FIXME: reopening adapter now? better wait for link up */
+		/* go through reopen to flush pending requests */
 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
 		break;
 	case FSF_PROT_REEST_QUEUE:
@@ -457,15 +459,10 @@
 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
 {
 	struct zfcp_fsf_req *req, *tmp;
-	unsigned long flags;
 	LIST_HEAD(remove_queue);
-	unsigned int i;
 
 	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
-	spin_lock_irqsave(&adapter->req_list_lock, flags);
-	for (i = 0; i < REQUEST_LIST_SIZE; i++)
-		list_splice_init(&adapter->req_list[i], &remove_queue);
-	spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+	zfcp_reqlist_move(adapter->req_list, &remove_queue);
 
 	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
 		list_del(&req->list);
@@ -495,8 +492,6 @@
 	fc_host_port_id(shost) = ntoh24(bottom->s_id);
 	fc_host_speed(shost) = bottom->fc_link_speed;
 	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
-	fc_host_supported_fc4s(shost)[2] = 1; /* FCP */
-	fc_host_active_fc4s(shost)[2] = 1; /* FCP */
 
 	adapter->hydra_version = bottom->adapter_type;
 	adapter->timer_ticks = bottom->timer_interval;
@@ -619,6 +614,10 @@
 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
 	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
 	fc_host_supported_speeds(shost) = bottom->supported_speed;
+	memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
+	       FC_FC4_LIST_SIZE);
+	memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
+	       FC_FC4_LIST_SIZE);
 }
 
 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
@@ -725,12 +724,12 @@
 	req->adapter = adapter;
 	req->fsf_command = fsf_cmd;
 	req->req_id = adapter->req_no;
-	req->queue_req.sbal_number = 1;
-	req->queue_req.sbal_first = req_q->first;
-	req->queue_req.sbal_last = req_q->first;
-	req->queue_req.sbale_curr = 1;
+	req->qdio_req.sbal_number = 1;
+	req->qdio_req.sbal_first = req_q->first;
+	req->qdio_req.sbal_last = req_q->first;
+	req->qdio_req.sbale_curr = 1;
 
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].addr = (void *) req->req_id;
 	sbale[0].flags |= SBAL_FLAGS0_COMMAND;
 
@@ -745,6 +744,7 @@
 			return ERR_PTR(-ENOMEM);
 		}
 
+		req->seq_no = adapter->fsf_req_seq_no;
 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
 		req->qtcb->prefix.req_id = req->req_id;
 		req->qtcb->prefix.ulp_info = 26;
@@ -752,8 +752,6 @@
 		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
 		req->qtcb->header.req_handle = req->req_id;
 		req->qtcb->header.fsf_command = req->fsf_command;
-		req->seq_no = adapter->fsf_req_seq_no;
-		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
 		sbale[1].addr = (void *) req->qtcb;
 		sbale[1].length = sizeof(struct fsf_qtcb);
 	}
@@ -770,25 +768,17 @@
 {
 	struct zfcp_adapter *adapter = req->adapter;
 	struct zfcp_qdio *qdio = adapter->qdio;
-	unsigned long	     flags;
-	int		     idx;
-	int		     with_qtcb = (req->qtcb != NULL);
+	int with_qtcb = (req->qtcb != NULL);
+	int req_id = req->req_id;
 
-	/* put allocated FSF request into hash table */
-	spin_lock_irqsave(&adapter->req_list_lock, flags);
-	idx = zfcp_reqlist_hash(req->req_id);
-	list_add_tail(&req->list, &adapter->req_list[idx]);
-	spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+	zfcp_reqlist_add(adapter->req_list, req);
 
-	req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
+	req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
 	req->issued = get_clock();
-	if (zfcp_qdio_send(qdio, &req->queue_req)) {
+	if (zfcp_qdio_send(qdio, &req->qdio_req)) {
 		del_timer(&req->timer);
-		spin_lock_irqsave(&adapter->req_list_lock, flags);
 		/* lookup request again, list might have changed */
-		if (zfcp_reqlist_find_safe(adapter, req))
-			zfcp_reqlist_remove(adapter, req);
-		spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+		zfcp_reqlist_find_rm(adapter->req_list, req_id);
 		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
 		return -EIO;
 	}
@@ -826,9 +816,9 @@
 		goto out;
 	}
 
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
-	req->queue_req.sbale_curr = 2;
+	req->qdio_req.sbale_curr = 2;
 
 	sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
 	if (!sr_buf) {
@@ -837,7 +827,7 @@
 	}
 	memset(sr_buf, 0, sizeof(*sr_buf));
 	req->data = sr_buf;
-	sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req);
 	sbale->addr = (void *) sr_buf;
 	sbale->length = sizeof(*sr_buf);
 
@@ -934,7 +924,7 @@
 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
 		goto out_error_free;
 
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1029,7 +1019,7 @@
 {
 	struct zfcp_adapter *adapter = req->adapter;
 	struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
-							       &req->queue_req);
+							       &req->qdio_req);
 	u32 feat = adapter->adapter_features;
 	int bytes;
 
@@ -1047,15 +1037,15 @@
 		return 0;
 	}
 
-	bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
+	bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
 					SBAL_FLAGS0_TYPE_WRITE_READ,
 					sg_req, max_sbals);
 	if (bytes <= 0)
 		return -EIO;
 	req->qtcb->bottom.support.req_buf_length = bytes;
-	req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+	req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
 
-	bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
+	bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
 					SBAL_FLAGS0_TYPE_WRITE_READ,
 					sg_resp, max_sbals);
 	req->qtcb->bottom.support.resp_buf_length = bytes;
@@ -1251,7 +1241,7 @@
 	}
 
 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1262,13 +1252,13 @@
 			FSF_FEATURE_UPDATE_ALERT;
 	req->erp_action = erp_action;
 	req->handler = zfcp_fsf_exchange_config_data_handler;
-	erp_action->fsf_req = req;
+	erp_action->fsf_req_id = req->req_id;
 
 	zfcp_fsf_start_erp_timer(req);
 	retval = zfcp_fsf_req_send(req);
 	if (retval) {
 		zfcp_fsf_req_free(req);
-		erp_action->fsf_req = NULL;
+		erp_action->fsf_req_id = 0;
 	}
 out:
 	spin_unlock_bh(&qdio->req_q_lock);
@@ -1293,7 +1283,7 @@
 		goto out_unlock;
 	}
 
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 	req->handler = zfcp_fsf_exchange_config_data_handler;
@@ -1349,19 +1339,19 @@
 	}
 
 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
 	req->handler = zfcp_fsf_exchange_port_data_handler;
 	req->erp_action = erp_action;
-	erp_action->fsf_req = req;
+	erp_action->fsf_req_id = req->req_id;
 
 	zfcp_fsf_start_erp_timer(req);
 	retval = zfcp_fsf_req_send(req);
 	if (retval) {
 		zfcp_fsf_req_free(req);
-		erp_action->fsf_req = NULL;
+		erp_action->fsf_req_id = 0;
 	}
 out:
 	spin_unlock_bh(&qdio->req_q_lock);
@@ -1398,7 +1388,7 @@
 	if (data)
 		req->data = data;
 
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1484,7 +1474,7 @@
 	}
 
 out:
-	put_device(&port->sysfs_device);
+	put_device(&port->dev);
 }
 
 /**
@@ -1513,7 +1503,7 @@
 	}
 
 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
         sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1521,15 +1511,15 @@
 	hton24(req->qtcb->bottom.support.d_id, port->d_id);
 	req->data = port;
 	req->erp_action = erp_action;
-	erp_action->fsf_req = req;
-	get_device(&port->sysfs_device);
+	erp_action->fsf_req_id = req->req_id;
+	get_device(&port->dev);
 
 	zfcp_fsf_start_erp_timer(req);
 	retval = zfcp_fsf_req_send(req);
 	if (retval) {
 		zfcp_fsf_req_free(req);
-		erp_action->fsf_req = NULL;
-		put_device(&port->sysfs_device);
+		erp_action->fsf_req_id = 0;
+		put_device(&port->dev);
 	}
 out:
 	spin_unlock_bh(&qdio->req_q_lock);
@@ -1583,7 +1573,7 @@
 	}
 
 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1591,13 +1581,13 @@
 	req->data = erp_action->port;
 	req->erp_action = erp_action;
 	req->qtcb->header.port_handle = erp_action->port->handle;
-	erp_action->fsf_req = req;
+	erp_action->fsf_req_id = req->req_id;
 
 	zfcp_fsf_start_erp_timer(req);
 	retval = zfcp_fsf_req_send(req);
 	if (retval) {
 		zfcp_fsf_req_free(req);
-		erp_action->fsf_req = NULL;
+		erp_action->fsf_req_id = 0;
 	}
 out:
 	spin_unlock_bh(&qdio->req_q_lock);
@@ -1660,7 +1650,7 @@
 	}
 
 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1715,7 +1705,7 @@
 	}
 
 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1809,7 +1799,7 @@
 	}
 
 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1817,13 +1807,13 @@
 	req->qtcb->header.port_handle = erp_action->port->handle;
 	req->erp_action = erp_action;
 	req->handler = zfcp_fsf_close_physical_port_handler;
-	erp_action->fsf_req = req;
+	erp_action->fsf_req_id = req->req_id;
 
 	zfcp_fsf_start_erp_timer(req);
 	retval = zfcp_fsf_req_send(req);
 	if (retval) {
 		zfcp_fsf_req_free(req);
-		erp_action->fsf_req = NULL;
+		erp_action->fsf_req_id = 0;
 	}
 out:
 	spin_unlock_bh(&qdio->req_q_lock);
@@ -1982,7 +1972,7 @@
 	}
 
 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
         sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1991,7 +1981,7 @@
 	req->handler = zfcp_fsf_open_unit_handler;
 	req->data = erp_action->unit;
 	req->erp_action = erp_action;
-	erp_action->fsf_req = req;
+	erp_action->fsf_req_id = req->req_id;
 
 	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
 		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
@@ -2000,7 +1990,7 @@
 	retval = zfcp_fsf_req_send(req);
 	if (retval) {
 		zfcp_fsf_req_free(req);
-		erp_action->fsf_req = NULL;
+		erp_action->fsf_req_id = 0;
 	}
 out:
 	spin_unlock_bh(&qdio->req_q_lock);
@@ -2068,7 +2058,7 @@
 	}
 
 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -2077,13 +2067,13 @@
 	req->handler = zfcp_fsf_close_unit_handler;
 	req->data = erp_action->unit;
 	req->erp_action = erp_action;
-	erp_action->fsf_req = req;
+	erp_action->fsf_req_id = req->req_id;
 
 	zfcp_fsf_start_erp_timer(req);
 	retval = zfcp_fsf_req_send(req);
 	if (retval) {
 		zfcp_fsf_req_free(req);
-		erp_action->fsf_req = NULL;
+		erp_action->fsf_req_id = 0;
 	}
 out:
 	spin_unlock_bh(&qdio->req_q_lock);
@@ -2111,8 +2101,8 @@
 	blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
 		blktrc.flags |= ZFCP_BLK_REQ_ERROR;
-	blktrc.inb_usage = req->queue_req.qdio_inb_usage;
-	blktrc.outb_usage = req->queue_req.qdio_outb_usage;
+	blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
+	blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
 
 	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
 		blktrc.flags |= ZFCP_BLK_LAT_VALID;
@@ -2169,12 +2159,7 @@
 	zfcp_fsf_req_trace(req, scpnt);
 
 skip_fsfstatus:
-	if (scpnt->result != 0)
-		zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
-	else if (scpnt->retries > 0)
-		zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
-	else
-		zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
+	zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
 
 	scpnt->host_scribble = NULL;
 	(scpnt->scsi_done) (scpnt);
@@ -2274,7 +2259,7 @@
 	else {
 		zfcp_fsf_send_fcp_command_task_handler(req);
 		req->unit = NULL;
-		put_device(&unit->sysfs_device);
+		put_device(&unit->dev);
 	}
 }
 
@@ -2312,7 +2297,7 @@
 	}
 
 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-	get_device(&unit->sysfs_device);
+	get_device(&unit->dev);
 	req->unit = unit;
 	req->data = scsi_cmnd;
 	req->handler = zfcp_fsf_send_fcp_command_handler;
@@ -2346,11 +2331,11 @@
 	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
 
-	real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
+	real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype,
 					     scsi_sglist(scsi_cmnd),
 					     FSF_MAX_SBALS_PER_REQ);
 	if (unlikely(real_bytes < 0)) {
-		if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
+		if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
 			dev_err(&adapter->ccw_device->dev,
 				"Oversize data package, unit 0x%016Lx "
 				"on port 0x%016Lx closed\n",
@@ -2369,7 +2354,7 @@
 	goto out;
 
 failed_scsi_cmnd:
-	put_device(&unit->sysfs_device);
+	put_device(&unit->dev);
 	zfcp_fsf_req_free(req);
 	scsi_cmnd->host_scribble = NULL;
 out:
@@ -2415,7 +2400,7 @@
 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
 	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
 
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -2478,14 +2463,14 @@
 
 	req->handler = zfcp_fsf_control_file_handler;
 
-	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
 	sbale[0].flags |= direction;
 
 	bottom = &req->qtcb->bottom.support;
 	bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
 	bottom->option = fsf_cfdc->option;
 
-	bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req,
+	bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
 					direction, fsf_cfdc->sg,
 					FSF_MAX_SBALS_PER_REQ);
 	if (bytes != ZFCP_CFDC_MAX_SIZE) {
@@ -2516,15 +2501,14 @@
 	struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
 	struct qdio_buffer_element *sbale;
 	struct zfcp_fsf_req *fsf_req;
-	unsigned long flags, req_id;
+	unsigned long req_id;
 	int idx;
 
 	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
 
 		sbale = &sbal->element[idx];
 		req_id = (unsigned long) sbale->addr;
-		spin_lock_irqsave(&adapter->req_list_lock, flags);
-		fsf_req = zfcp_reqlist_find(adapter, req_id);
+		fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
 
 		if (!fsf_req)
 			/*
@@ -2534,11 +2518,8 @@
 			panic("error: unknown req_id (%lx) on adapter %s.\n",
 			      req_id, dev_name(&adapter->ccw_device->dev));
 
-		list_del(&fsf_req->list);
-		spin_unlock_irqrestore(&adapter->req_list_lock, flags);
-
-		fsf_req->queue_req.sbal_response = sbal_idx;
-		fsf_req->queue_req.qdio_inb_usage =
+		fsf_req->qdio_req.sbal_response = sbal_idx;
+		fsf_req->qdio_req.qdio_inb_usage =
 			atomic_read(&qdio->resp_q.count);
 		zfcp_fsf_req_complete(fsf_req);
 
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 6c5228b..71b97ff 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -10,6 +10,7 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include "zfcp_ext.h"
+#include "zfcp_qdio.h"
 
 #define QBUFF_PER_PAGE		(PAGE_SIZE / sizeof(struct qdio_buffer))
 
@@ -28,12 +29,6 @@
 	return 0;
 }
 
-static struct qdio_buffer_element *
-zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
-{
-	return &q->sbal[sbal_idx]->element[sbale_idx];
-}
-
 static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id)
 {
 	struct zfcp_adapter *adapter = qdio->adapter;
@@ -106,7 +101,7 @@
 
 	if (unlikely(retval)) {
 		atomic_set(&queue->count, count);
-		/* FIXME: Recover this with an adapter reopen? */
+		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL);
 	} else {
 		queue->first += count;
 		queue->first %= QDIO_MAX_BUFFERS_PER_Q;
@@ -145,32 +140,8 @@
 	zfcp_qdio_resp_put_back(qdio, count);
 }
 
-/**
- * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
- * @qdio: pointer to struct zfcp_qdio
- * @q_rec: pointer to struct zfcp_queue_rec
- * Returns: pointer to qdio_buffer_element (SBALE) structure
- */
-struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio,
-						struct zfcp_queue_req *q_req)
-{
-	return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
-}
-
-/**
- * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
- * @fsf_req: pointer to struct fsf_req
- * Returns: pointer to qdio_buffer_element (SBALE) structure
- */
-struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio,
-						 struct zfcp_queue_req *q_req)
-{
-	return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
-			       q_req->sbale_curr);
-}
-
 static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
-				 struct zfcp_queue_req *q_req, int max_sbals)
+				 struct zfcp_qdio_req *q_req, int max_sbals)
 {
 	int count = atomic_read(&qdio->req_q.count);
 	count = min(count, max_sbals);
@@ -179,7 +150,7 @@
 }
 
 static struct qdio_buffer_element *
-zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
+zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
 		     unsigned long sbtype)
 {
 	struct qdio_buffer_element *sbale;
@@ -214,7 +185,7 @@
 }
 
 static struct qdio_buffer_element *
-zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
+zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
 		     unsigned int sbtype)
 {
 	if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -224,7 +195,7 @@
 }
 
 static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
-				 struct zfcp_queue_req *q_req)
+				 struct zfcp_qdio_req *q_req)
 {
 	struct qdio_buffer **sbal = qdio->req_q.sbal;
 	int first = q_req->sbal_first;
@@ -235,7 +206,7 @@
 }
 
 static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
-				struct zfcp_queue_req *q_req,
+				struct zfcp_qdio_req *q_req,
 				unsigned int sbtype, void *start_addr,
 				unsigned int total_length)
 {
@@ -271,8 +242,7 @@
  * @max_sbals: upper bound for number of SBALs to be used
  * Returns: number of bytes, or error (negativ)
  */
-int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
-			    struct zfcp_queue_req *q_req,
+int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
 			    unsigned long sbtype, struct scatterlist *sg,
 			    int max_sbals)
 {
@@ -304,10 +274,10 @@
 /**
  * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
  * @qdio: pointer to struct zfcp_qdio
- * @q_req: pointer to struct zfcp_queue_req
+ * @q_req: pointer to struct zfcp_qdio_req
  * Returns: 0 on success, error otherwise
  */
-int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req)
+int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
 {
 	struct zfcp_qdio_queue *req_q = &qdio->req_q;
 	int first = q_req->sbal_first;
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
new file mode 100644
index 0000000..8cca546
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -0,0 +1,109 @@
+/*
+ * zfcp device driver
+ *
+ * Header file for zfcp qdio interface
+ *
+ * Copyright IBM Corporation 2010
+ */
+
+#ifndef ZFCP_QDIO_H
+#define ZFCP_QDIO_H
+
+#include <asm/qdio.h>
+
+/**
+ * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
+ * @sbal: qdio buffers
+ * @first: index of next free buffer in queue
+ * @count: number of free buffers in queue
+ */
+struct zfcp_qdio_queue {
+	struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
+	u8		   first;
+	atomic_t           count;
+};
+
+/**
+ * struct zfcp_qdio - basic qdio data structure
+ * @resp_q: response queue
+ * @req_q: request queue
+ * @stat_lock: lock to protect req_q_util and req_q_time
+ * @req_q_lock: lock to serialize access to request queue
+ * @req_q_time: time of last fill level change
+ * @req_q_util: used for accounting
+ * @req_q_full: queue full incidents
+ * @req_q_wq: used to wait for SBAL availability
+ * @adapter: adapter used in conjunction with this qdio structure
+ */
+struct zfcp_qdio {
+	struct zfcp_qdio_queue	resp_q;
+	struct zfcp_qdio_queue	req_q;
+	spinlock_t		stat_lock;
+	spinlock_t		req_q_lock;
+	unsigned long long	req_q_time;
+	u64			req_q_util;
+	atomic_t		req_q_full;
+	wait_queue_head_t	req_q_wq;
+	struct zfcp_adapter	*adapter;
+};
+
+/**
+ * struct zfcp_qdio_req - qdio queue related values for a request
+ * @sbal_number: number of free sbals
+ * @sbal_first: first sbal for this request
+ * @sbal_last: last sbal for this request
+ * @sbal_limit: last possible sbal for this request
+ * @sbale_curr: current sbale at creation of this request
+ * @sbal_response: sbal used in interrupt
+ * @qdio_outb_usage: usage of outbound queue
+ * @qdio_inb_usage: usage of inbound queue
+ */
+struct zfcp_qdio_req {
+	u8	sbal_number;
+	u8	sbal_first;
+	u8	sbal_last;
+	u8	sbal_limit;
+	u8	sbale_curr;
+	u8	sbal_response;
+	u16	qdio_outb_usage;
+	u16	qdio_inb_usage;
+};
+
+/**
+ * zfcp_qdio_sbale - return pointer to sbale in qdio queue
+ * @q: queue where to find sbal
+ * @sbal_idx: sbal index in queue
+ * @sbale_idx: sbale index in sbal
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
+{
+	return &q->sbal[sbal_idx]->element[sbale_idx];
+}
+
+/**
+ * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_rec: pointer to struct zfcp_qdio_req
+ * Returns: pointer to qdio_buffer_element (sbale) structure
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
+}
+
+/**
+ * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
+ * @qdio: pointer to struct zfcp_qdio
+ * @fsf_req: pointer to struct zfcp_fsf_req
+ * Returns: pointer to qdio_buffer_element (sbale) structure
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
+			       q_req->sbale_curr);
+}
+
+#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
new file mode 100644
index 0000000..a72d1b7
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -0,0 +1,183 @@
+/*
+ * zfcp device driver
+ *
+ * Data structure and helper functions for tracking pending FSF
+ * requests.
+ *
+ * Copyright IBM Corporation 2009
+ */
+
+#ifndef ZFCP_REQLIST_H
+#define ZFCP_REQLIST_H
+
+/* number of hash buckets */
+#define ZFCP_REQ_LIST_BUCKETS 128
+
+/**
+ * struct zfcp_reqlist - Container for request list (reqlist)
+ * @lock: Spinlock for protecting the hash list
+ * @list: Array of hashbuckets, each is a list of requests in this bucket
+ */
+struct zfcp_reqlist {
+	spinlock_t lock;
+	struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
+};
+
+static inline int zfcp_reqlist_hash(unsigned long req_id)
+{
+	return req_id % ZFCP_REQ_LIST_BUCKETS;
+}
+
+/**
+ * zfcp_reqlist_alloc - Allocate and initialize reqlist
+ *
+ * Returns pointer to allocated reqlist on success, or NULL on
+ * allocation failure.
+ */
+static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
+{
+	unsigned int i;
+	struct zfcp_reqlist *rl;
+
+	rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
+	if (!rl)
+		return NULL;
+
+	spin_lock_init(&rl->lock);
+
+	for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+		INIT_LIST_HEAD(&rl->buckets[i]);
+
+	return rl;
+}
+
+/**
+ * zfcp_reqlist_isempty - Check whether the request list empty
+ * @rl: pointer to reqlist
+ *
+ * Returns: 1 if list is empty, 0 if not
+ */
+static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
+{
+	unsigned int i;
+
+	for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+		if (!list_empty(&rl->buckets[i]))
+			return 0;
+	return 1;
+}
+
+/**
+ * zfcp_reqlist_free - Free allocated memory for reqlist
+ * @rl: The reqlist where to free memory
+ */
+static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
+{
+	/* sanity check */
+	BUG_ON(!zfcp_reqlist_isempty(rl));
+
+	kfree(rl);
+}
+
+static inline struct zfcp_fsf_req *
+_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+	struct zfcp_fsf_req *req;
+	unsigned int i;
+
+	i = zfcp_reqlist_hash(req_id);
+	list_for_each_entry(req, &rl->buckets[i], list)
+		if (req->req_id == req_id)
+			return req;
+	return NULL;
+}
+
+/**
+ * zfcp_reqlist_find - Lookup FSF request by its request id
+ * @rl: The reqlist where to lookup the FSF request
+ * @req_id: The request id to look for
+ *
+ * Returns a pointer to the FSF request with the specified request id
+ * or NULL if there is no known FSF request with this id.
+ */
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+	unsigned long flags;
+	struct zfcp_fsf_req *req;
+
+	spin_lock_irqsave(&rl->lock, flags);
+	req = _zfcp_reqlist_find(rl, req_id);
+	spin_unlock_irqrestore(&rl->lock, flags);
+
+	return req;
+}
+
+/**
+ * zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist
+ * @rl: reqlist where to search and remove entry
+ * @req_id: The request id of the request to look for
+ *
+ * This functions tries to find the FSF request with the specified
+ * id and then removes it from the reqlist. The reqlist lock is held
+ * during both steps of the operation.
+ *
+ * Returns: Pointer to the FSF request if the request has been found,
+ * NULL if it has not been found.
+ */
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+	unsigned long flags;
+	struct zfcp_fsf_req *req;
+
+	spin_lock_irqsave(&rl->lock, flags);
+	req = _zfcp_reqlist_find(rl, req_id);
+	if (req)
+		list_del(&req->list);
+	spin_unlock_irqrestore(&rl->lock, flags);
+
+	return req;
+}
+
+/**
+ * zfcp_reqlist_add - Add entry to reqlist
+ * @rl: reqlist where to add the entry
+ * @req: The entry to add
+ *
+ * The request id always increases. As an optimization new requests
+ * are added here with list_add_tail at the end of the bucket lists
+ * while old requests are looked up starting at the beginning of the
+ * lists.
+ */
+static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
+				    struct zfcp_fsf_req *req)
+{
+	unsigned int i;
+	unsigned long flags;
+
+	i = zfcp_reqlist_hash(req->req_id);
+
+	spin_lock_irqsave(&rl->lock, flags);
+	list_add_tail(&req->list, &rl->buckets[i]);
+	spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+/**
+ * zfcp_reqlist_move - Move all entries from reqlist to simple list
+ * @rl: The zfcp_reqlist where to remove all entries
+ * @list: The list where to move all entries
+ */
+static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
+				     struct list_head *list)
+{
+	unsigned int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rl->lock, flags);
+	for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+		list_splice_init(&rl->buckets[i], list);
+	spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+#endif /* ZFCP_REQLIST_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 8e6fc68..c3c4178 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
  *
  * Interface to Linux SCSI midlayer.
  *
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -15,6 +15,7 @@
 #include "zfcp_ext.h"
 #include "zfcp_dbf.h"
 #include "zfcp_fc.h"
+#include "zfcp_reqlist.h"
 
 static unsigned int default_depth = 32;
 module_param_named(queue_depth, default_depth, uint, 0600);
@@ -43,7 +44,7 @@
 {
 	struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
 	unit->device = NULL;
-	put_device(&unit->sysfs_device);
+	put_device(&unit->dev);
 }
 
 static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -59,10 +60,9 @@
 {
 	struct zfcp_adapter *adapter =
 		(struct zfcp_adapter *) scpnt->device->host->hostdata[0];
+
 	set_host_byte(scpnt, result);
-	if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
-		zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
-	/* return directly */
+	zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
 	scpnt->scsi_done(scpnt);
 }
 
@@ -86,18 +86,10 @@
 	adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
 	unit = scpnt->device->hostdata;
 
-	BUG_ON(!adapter || (adapter != unit->port->adapter));
-	BUG_ON(!scpnt->scsi_done);
-
-	if (unlikely(!unit)) {
-		zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
-		return 0;
-	}
-
 	scsi_result = fc_remote_port_chkready(rport);
 	if (unlikely(scsi_result)) {
 		scpnt->result = scsi_result;
-		zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
+		zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
 		scpnt->scsi_done(scpnt);
 		return 0;
 	}
@@ -189,9 +181,7 @@
 	/* avoid race condition between late normal completion and abort */
 	write_lock_irqsave(&adapter->abort_lock, flags);
 
-	spin_lock(&adapter->req_list_lock);
-	old_req = zfcp_reqlist_find(adapter, old_reqid);
-	spin_unlock(&adapter->req_list_lock);
+	old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
 	if (!old_req) {
 		write_unlock_irqrestore(&adapter->abort_lock, flags);
 		zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
@@ -521,7 +511,7 @@
 
 	if (port) {
 		zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
-		put_device(&port->sysfs_device);
+		put_device(&port->dev);
 	}
 }
 
@@ -563,23 +553,23 @@
 
 void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
 {
-	get_device(&port->sysfs_device);
+	get_device(&port->dev);
 	port->rport_task = RPORT_ADD;
 
 	if (!queue_work(port->adapter->work_queue, &port->rport_work))
-		put_device(&port->sysfs_device);
+		put_device(&port->dev);
 }
 
 void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
 {
-	get_device(&port->sysfs_device);
+	get_device(&port->dev);
 	port->rport_task = RPORT_DEL;
 
 	if (port->rport && queue_work(port->adapter->work_queue,
 				      &port->rport_work))
 		return;
 
-	put_device(&port->sysfs_device);
+	put_device(&port->dev);
 }
 
 void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
@@ -608,7 +598,7 @@
 		}
 	}
 
-	put_device(&port->sysfs_device);
+	put_device(&port->dev);
 }
 
 
@@ -626,7 +616,7 @@
 				 scsilun_to_int((struct scsi_lun *)
 						&unit->fcp_lun), 0);
 
-	put_device(&unit->sysfs_device);
+	put_device(&unit->dev);
 }
 
 struct fc_function_template zfcp_transport_functions = {
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index f539e00..a43035d 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,7 +3,7 @@
  *
  * sysfs attributes.
  *
- * Copyright IBM Corporation 2008, 2009
+ * Copyright IBM Corporation 2008, 2010
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -19,8 +19,7 @@
 						   struct device_attribute *at,\
 						   char *buf)		       \
 {									       \
-	struct _feat_def *_feat = container_of(dev, struct _feat_def,	       \
-					       sysfs_device);		       \
+	struct _feat_def *_feat = container_of(dev, struct _feat_def, dev);    \
 									       \
 	return sprintf(buf, _format, _value);				       \
 }									       \
@@ -87,8 +86,7 @@
 						struct device_attribute *attr, \
 						char *buf)		       \
 {									       \
-	struct _feat_def *_feat = container_of(dev, struct _feat_def,	       \
-					       sysfs_device);		       \
+	struct _feat_def *_feat = container_of(dev, struct _feat_def, dev);    \
 									       \
 	if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED)       \
 		return sprintf(buf, "1\n");				       \
@@ -99,12 +97,11 @@
 						 struct device_attribute *attr,\
 						 const char *buf, size_t count)\
 {									       \
-	struct _feat_def *_feat = container_of(dev, struct _feat_def,	       \
-					       sysfs_device);		       \
+	struct _feat_def *_feat = container_of(dev, struct _feat_def, dev);    \
 	unsigned long val;						       \
 	int retval = 0;							       \
 									       \
-	if (!(_feat && get_device(&_feat->sysfs_device)))		       \
+	if (!(_feat && get_device(&_feat->dev)))			       \
 		return -EBUSY;						       \
 									       \
 	if (strict_strtoul(buf, 0, &val) || val != 0) {			       \
@@ -118,7 +115,7 @@
 				  _reopen_id, NULL);			       \
 	zfcp_erp_wait(_adapter);					       \
 out:									       \
-	put_device(&_feat->sysfs_device);				       \
+	put_device(&_feat->dev);					       \
 	return retval ? retval : (ssize_t) count;			       \
 }									       \
 static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO,			       \
@@ -224,10 +221,10 @@
 	list_del(&port->list);
 	write_unlock_irq(&adapter->port_list_lock);
 
-	put_device(&port->sysfs_device);
+	put_device(&port->dev);
 
 	zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
-	zfcp_device_unregister(&port->sysfs_device, &zfcp_sysfs_port_attrs);
+	zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
  out:
 	zfcp_ccw_adapter_put(adapter);
 	return retval ? retval : (ssize_t) count;
@@ -258,13 +255,12 @@
 					 struct device_attribute *attr,
 					 const char *buf, size_t count)
 {
-	struct zfcp_port *port = container_of(dev, struct zfcp_port,
-					      sysfs_device);
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
 	struct zfcp_unit *unit;
 	u64 fcp_lun;
 	int retval = -EINVAL;
 
-	if (!(port && get_device(&port->sysfs_device)))
+	if (!(port && get_device(&port->dev)))
 		return -EBUSY;
 
 	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
@@ -280,7 +276,7 @@
 	zfcp_erp_wait(unit->port->adapter);
 	flush_work(&unit->scsi_work);
 out:
-	put_device(&port->sysfs_device);
+	put_device(&port->dev);
 	return retval ? retval : (ssize_t) count;
 }
 static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@@ -289,13 +285,12 @@
 					    struct device_attribute *attr,
 					    const char *buf, size_t count)
 {
-	struct zfcp_port *port = container_of(dev, struct zfcp_port,
-					      sysfs_device);
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
 	struct zfcp_unit *unit;
 	u64 fcp_lun;
 	int retval = -EINVAL;
 
-	if (!(port && get_device(&port->sysfs_device)))
+	if (!(port && get_device(&port->dev)))
 		return -EBUSY;
 
 	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
@@ -314,12 +309,12 @@
 	list_del(&unit->list);
 	write_unlock_irq(&port->unit_list_lock);
 
-	put_device(&unit->sysfs_device);
+	put_device(&unit->dev);
 
 	zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
-	zfcp_device_unregister(&unit->sysfs_device, &zfcp_sysfs_unit_attrs);
+	zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
 out:
-	put_device(&port->sysfs_device);
+	put_device(&port->dev);
 	return retval ? retval : (ssize_t) count;
 }
 static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index 75ac19b..fc2f676 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -233,7 +233,7 @@
 
 	ph = 0;
 	if (dp)
-		ph = dp->node;
+		ph = dp->phandle;
 
 	data->current_node = dp;
 	*((int *) op->oprom_array) = ph;
@@ -256,7 +256,7 @@
 
 		dp = pci_device_to_OF_node(pdev);
 		data->current_node = dp;
-		*((int *)op->oprom_array) = dp->node;
+		*((int *)op->oprom_array) = dp->phandle;
 		op->oprom_size = sizeof(int);
 		err = copyout(argp, op, bufsize + sizeof(int));
 
@@ -273,7 +273,7 @@
 
 	dp = of_find_node_by_path(op->oprom_array);
 	if (dp)
-		ph = dp->node;
+		ph = dp->phandle;
 	data->current_node = dp;
 	*((int *)op->oprom_array) = ph;
 	op->oprom_size = sizeof(int);
@@ -540,7 +540,7 @@
 		}
 	}
 	if (dp)
-		nd = dp->node;
+		nd = dp->phandle;
 	if (copy_to_user(argp, &nd, sizeof(phandle)))
 		return -EFAULT;
 
@@ -570,7 +570,7 @@
 	case OPIOCGETOPTNODE:
 		BUILD_BUG_ON(sizeof(phandle) != sizeof(int));
 
-		if (copy_to_user(argp, &options_node->node, sizeof(phandle)))
+		if (copy_to_user(argp, &options_node->phandle, sizeof(phandle)))
 			return -EFAULT;
 
 		return 0;
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index b898d38..e40cdfb 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -3924,7 +3924,7 @@
 {
 	struct sccb_mgr_tar_info *currTar_Info;
 
-	if ((p_sccb->TargID > MAX_SCSI_TAR) || (p_sccb->Lun > MAX_LUN)) {
+	if ((p_sccb->TargID >= MAX_SCSI_TAR) || (p_sccb->Lun >= MAX_LUN)) {
 		return;
 	}
 	currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID];
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index a93a504..136b49c 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -24,6 +24,10 @@
 #define FW_VER_LEN	32
 #define MCC_Q_LEN	128
 #define MCC_CQ_LEN	256
+#define MAX_MCC_CMD	16
+/* BladeEngine Generation numbers */
+#define BE_GEN2 2
+#define BE_GEN3 3
 
 struct be_dma_mem {
 	void *va;
@@ -57,6 +61,11 @@
 	return q->dma_mem.va + q->head * q->entry_size;
 }
 
+static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num)
+{
+	return q->dma_mem.va + wrb_num * q->entry_size;
+}
+
 static inline void *queue_tail_node(struct be_queue_info *q)
 {
 	return q->dma_mem.va + q->tail * q->entry_size;
@@ -104,15 +113,19 @@
 	spinlock_t mcc_lock;	/* For serializing mcc cmds to BE card */
 	spinlock_t mcc_cq_lock;
 
-	/* MCC Async callback */
-	void (*async_cb) (void *adapter, bool link_up);
-	void *adapter_ctxt;
+	wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1];
+	unsigned int mcc_tag[MAX_MCC_CMD];
+	unsigned int mcc_numtag[MAX_MCC_CMD + 1];
+	unsigned short mcc_alloc_index;
+	unsigned short mcc_free_index;
+	unsigned int mcc_tag_available;
 };
 
 #include "be_cmds.h"
 
 #define PAGE_SHIFT_4K 12
 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
+#define mcc_timeout		120000 /* 5s timeout */
 
 /* Returns number of pages spanned by the data starting at the given addr */
 #define PAGES_4K_SPANNED(_address, size) 				\
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index f008708..6709857 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -19,7 +19,7 @@
 #include "be_mgmt.h"
 #include "be_main.h"
 
-static void be_mcc_notify(struct beiscsi_hba *phba)
+void be_mcc_notify(struct beiscsi_hba *phba)
 {
 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 	u32 val = 0;
@@ -29,6 +29,52 @@
 	iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
 }
 
+unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
+{
+	unsigned int tag = 0;
+	unsigned int num = 0;
+
+mcc_tag_rdy:
+	if (phba->ctrl.mcc_tag_available) {
+		tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
+		phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
+		phba->ctrl.mcc_numtag[tag] = 0;
+	} else {
+		udelay(100);
+		num++;
+		if (num < mcc_timeout)
+			goto mcc_tag_rdy;
+	}
+	if (tag) {
+		phba->ctrl.mcc_tag_available--;
+		if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
+			phba->ctrl.mcc_alloc_index = 0;
+		else
+			phba->ctrl.mcc_alloc_index++;
+	}
+	return tag;
+}
+
+void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
+{
+	spin_lock(&ctrl->mbox_lock);
+	tag = tag & 0x000000FF;
+	ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
+	if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
+		ctrl->mcc_free_index = 0;
+	else
+		ctrl->mcc_free_index++;
+	ctrl->mcc_tag_available++;
+	spin_unlock(&ctrl->mbox_lock);
+}
+
+bool is_link_state_evt(u32 trailer)
+{
+	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+		  ASYNC_TRAILER_EVENT_CODE_MASK) ==
+		  ASYNC_EVENT_CODE_LINK_STATE);
+}
+
 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
 {
 	if (compl->flags != 0) {
@@ -64,12 +110,30 @@
 	return 0;
 }
 
-
-static inline bool is_link_state_evt(u32 trailer)
+int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
+				    struct be_mcc_compl *compl)
 {
-	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
-		  ASYNC_TRAILER_EVENT_CODE_MASK) ==
-		  ASYNC_EVENT_CODE_LINK_STATE);
+	u16 compl_status, extd_status;
+	unsigned short tag;
+
+	be_dws_le_to_cpu(compl, 4);
+
+	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+					CQE_STATUS_COMPL_MASK;
+	/* The ctrl.mcc_numtag[tag] is filled with
+	 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
+	 * [7:0] = compl_status
+	 */
+	tag = (compl->tag0 & 0x000000FF);
+	extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+					CQE_STATUS_EXTD_MASK;
+
+	ctrl->mcc_numtag[tag]  = 0x80000000;
+	ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
+	ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
+	ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
+	wake_up_interruptible(&ctrl->mcc_wait[tag]);
+	return 0;
 }
 
 static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
@@ -89,7 +153,7 @@
 	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
 }
 
-static void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
+void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
 		struct be_async_event_link_state *evt)
 {
 	switch (evt->port_link_status) {
@@ -97,13 +161,13 @@
 		SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
 						evt->physical_port);
 		phba->state |= BE_ADAPTER_LINK_DOWN;
+		iscsi_host_for_each_session(phba->shost,
+					    be2iscsi_fail_session);
 		break;
 	case ASYNC_EVENT_LINK_UP:
 		phba->state = BE_ADAPTER_UP;
 		SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
 						evt->physical_port);
-		iscsi_host_for_each_session(phba->shost,
-					    be2iscsi_fail_session);
 		break;
 	default:
 		SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
@@ -162,7 +226,6 @@
 /* Wait till no more pending mcc requests are present */
 static int be_mcc_wait_compl(struct beiscsi_hba *phba)
 {
-#define mcc_timeout		120000 /* 5s timeout */
 	int i, status;
 	for (i = 0; i < mcc_timeout; i++) {
 		status = beiscsi_process_mcc(phba);
@@ -372,9 +435,10 @@
 
 	BUG_ON(atomic_read(&mccq->used) >= mccq->len);
 	wrb = queue_head_node(mccq);
+	memset(wrb, 0, sizeof(*wrb));
+	wrb->tag0 = (mccq->head & 0x000000FF) << 16;
 	queue_head_inc(mccq);
 	atomic_inc(&mccq->used);
-	memset(wrb, 0, sizeof(*wrb));
 	return wrb;
 }
 
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 5de8acb..49fcc78 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -425,14 +425,20 @@
 int be_poll_mcc(struct be_ctrl_info *ctrl);
 unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
 				      struct beiscsi_hba *phba);
-int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr);
-
+unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba);
+void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
 /*ISCSI Functuions */
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
 
 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
 struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
 int be_mcc_notify_wait(struct beiscsi_hba *phba);
+void be_mcc_notify(struct beiscsi_hba *phba);
+unsigned int alloc_mcc_tag(struct beiscsi_hba *phba);
+void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
+		struct be_async_event_link_state *evt);
+int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
+				    struct be_mcc_compl *compl);
 
 int be_mbox_notify(struct be_ctrl_info *ctrl);
 
@@ -448,6 +454,8 @@
 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
 		       struct be_queue_info *wrbq);
 
+bool is_link_state_evt(u32 trailer);
+
 struct be_default_pdu_context {
 	u32 dw[4];
 } __packed;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index d587b03..29a3aaf 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -101,6 +101,7 @@
 	struct iscsi_session *sess = cls_session->dd_data;
 	struct beiscsi_session *beiscsi_sess = sess->dd_data;
 
+	SE_DEBUG(DBG_LVL_8, "In beiscsi_session_destroy\n");
 	pci_pool_destroy(beiscsi_sess->bhs_pool);
 	iscsi_session_teardown(cls_session);
 }
@@ -224,6 +225,7 @@
 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
 	int len = 0;
 
+	SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
 	beiscsi_ep = beiscsi_conn->ep;
 	if (!beiscsi_ep) {
 		SE_DEBUG(DBG_LVL_1,
@@ -254,6 +256,7 @@
 	struct iscsi_session *session = conn->session;
 	int ret;
 
+	SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_set_param, param= %d\n", param);
 	ret = iscsi_set_param(cls_conn, param, buf, buflen);
 	if (ret)
 		return ret;
@@ -271,8 +274,8 @@
 			conn->max_recv_dlength = 65536;
 		break;
 	case ISCSI_PARAM_MAX_BURST:
-		if (session->first_burst > 262144)
-			session->first_burst = 262144;
+		if (session->max_burst > 262144)
+			session->max_burst = 262144;
 		break;
 	default:
 		return 0;
@@ -293,12 +296,41 @@
 			   enum iscsi_host_param param, char *buf)
 {
 	struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
+	struct be_cmd_resp_get_mac_addr *resp;
+	struct be_mcc_wrb *wrb;
+	unsigned int tag, wrb_num;
 	int len = 0;
+	unsigned short status, extd_status;
+	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
+	SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
 	switch (param) {
 	case ISCSI_HOST_PARAM_HWADDRESS:
-		be_cmd_get_mac_addr(phba, phba->mac_address);
-		len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
+		tag = be_cmd_get_mac_addr(phba);
+		if (!tag) {
+			SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed \n");
+			return -1;
+		} else
+			wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+						 phba->ctrl.mcc_numtag[tag]);
+
+		wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+		extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+		status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+		if (status || extd_status) {
+			SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
+					    " status = %d extd_status = %d \n",
+					    status, extd_status);
+			free_mcc_tag(&phba->ctrl, tag);
+			return -1;
+		} else {
+			wrb = queue_get_wrb(mccq, wrb_num);
+			free_mcc_tag(&phba->ctrl, tag);
+			resp = embedded_payload(wrb);
+			memcpy(phba->mac_address, resp->mac_address, ETH_ALEN);
+			len = sysfs_format_mac(buf, phba->mac_address,
+					       ETH_ALEN);
+		}
 		break;
 	default:
 		return iscsi_host_get_param(shost, param, buf);
@@ -378,6 +410,7 @@
 	struct beiscsi_endpoint *beiscsi_ep;
 	struct beiscsi_offload_params params;
 
+	SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_start\n");
 	memset(&params, 0, sizeof(struct beiscsi_offload_params));
 	beiscsi_ep = beiscsi_conn->ep;
 	if (!beiscsi_ep)
@@ -422,8 +455,14 @@
 {
 	struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
 	struct beiscsi_hba *phba = beiscsi_ep->phba;
+	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+	struct be_mcc_wrb *wrb;
+	struct tcp_connect_and_offload_out *ptcpcnct_out;
+	unsigned short status, extd_status;
+	unsigned int tag, wrb_num;
 	int ret = -1;
 
+	SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n");
 	beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
 	if (beiscsi_ep->ep_cid == 0xFFFF) {
 		SE_DEBUG(DBG_LVL_1, "No free cid available\n");
@@ -431,15 +470,44 @@
 	}
 	SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ",
 		 beiscsi_ep->ep_cid);
-	phba->ep_array[beiscsi_ep->ep_cid] = ep;
-	if (beiscsi_ep->ep_cid >
-	    (phba->fw_config.iscsi_cid_start + phba->params.cxns_per_ctrl)) {
+	phba->ep_array[beiscsi_ep->ep_cid -
+		       phba->fw_config.iscsi_cid_start] = ep;
+	if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
+				  phba->params.cxns_per_ctrl * 2)) {
 		SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
 		return ret;
 	}
 
 	beiscsi_ep->cid_vld = 0;
-	return mgmt_open_connection(phba, dst_addr, beiscsi_ep);
+	tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep);
+	if (!tag) {
+		SE_DEBUG(DBG_LVL_1,
+			 "mgmt_invalidate_connection Failed for cid=%d \n",
+			 beiscsi_ep->ep_cid);
+	} else {
+		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+					 phba->ctrl.mcc_numtag[tag]);
+	}
+	wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+	extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+	status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+	if (status || extd_status) {
+		SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
+				    " status = %d extd_status = %d \n",
+				    status, extd_status);
+		free_mcc_tag(&phba->ctrl, tag);
+		return -1;
+	} else {
+		wrb = queue_get_wrb(mccq, wrb_num);
+		free_mcc_tag(&phba->ctrl, tag);
+
+		ptcpcnct_out = 	embedded_payload(wrb);
+		beiscsi_ep = ep->dd_data;
+		beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
+		beiscsi_ep->cid_vld = 1;
+		SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
+	}
+	return 0;
 }
 
 /**
@@ -459,14 +527,12 @@
  * beiscsi_free_ep - free endpoint
  * @ep:	pointer to iscsi endpoint structure
  */
-static void beiscsi_free_ep(struct iscsi_endpoint *ep)
+static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
 {
-	struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
 	struct beiscsi_hba *phba = beiscsi_ep->phba;
 
 	beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
 	beiscsi_ep->phba = NULL;
-	iscsi_destroy_endpoint(ep);
 }
 
 /**
@@ -495,9 +561,9 @@
 		return ERR_PTR(ret);
 	}
 
-	if (phba->state) {
+	if (phba->state != BE_ADAPTER_UP) {
 		ret = -EBUSY;
-		SE_DEBUG(DBG_LVL_1, "The Adapet state is Not UP \n");
+		SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP \n");
 		return ERR_PTR(ret);
 	}
 
@@ -509,9 +575,9 @@
 
 	beiscsi_ep = ep->dd_data;
 	beiscsi_ep->phba = phba;
-
+	beiscsi_ep->openiscsi_ep = ep;
 	if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) {
-		SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
+		SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn \n");
 		ret = -ENOMEM;
 		goto free_ep;
 	}
@@ -519,7 +585,7 @@
 	return ep;
 
 free_ep:
-	beiscsi_free_ep(ep);
+	beiscsi_free_ep(beiscsi_ep);
 	return ERR_PTR(ret);
 }
 
@@ -546,20 +612,22 @@
  * @ep: The iscsi endpoint
  * @flag: The type of connection closure
  */
-static int beiscsi_close_conn(struct iscsi_endpoint *ep, int flag)
+static int beiscsi_close_conn(struct  beiscsi_endpoint *beiscsi_ep, int flag)
 {
 	int ret = 0;
-	struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
+	unsigned int tag;
 	struct beiscsi_hba *phba = beiscsi_ep->phba;
 
-	if (MGMT_STATUS_SUCCESS !=
-	    mgmt_upload_connection(phba, beiscsi_ep->ep_cid,
-		CONNECTION_UPLOAD_GRACEFUL)) {
+	tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
+	if (!tag) {
 		SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x",
 			 beiscsi_ep->ep_cid);
 		ret = -1;
+	} else {
+		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+					 phba->ctrl.mcc_numtag[tag]);
+		free_mcc_tag(&phba->ctrl, tag);
 	}
-
 	return ret;
 }
 
@@ -574,19 +642,17 @@
 	struct beiscsi_conn *beiscsi_conn;
 	struct beiscsi_endpoint *beiscsi_ep;
 	struct beiscsi_hba *phba;
-	int flag = 0;
 
 	beiscsi_ep = ep->dd_data;
 	phba = beiscsi_ep->phba;
-	SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect\n");
+	SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n",
+			     beiscsi_ep->ep_cid);
 
 	if (beiscsi_ep->conn) {
 		beiscsi_conn = beiscsi_ep->conn;
 		iscsi_suspend_queue(beiscsi_conn->conn);
-		beiscsi_close_conn(ep, flag);
 	}
 
-	beiscsi_free_ep(ep);
 }
 
 /**
@@ -619,23 +685,31 @@
 	struct iscsi_session *session = conn->session;
 	struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
-	unsigned int status;
+	unsigned int tag;
 	unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
 
-	SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop\n");
 	beiscsi_ep = beiscsi_conn->ep;
 	if (!beiscsi_ep) {
 		SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n");
 		return;
 	}
-	status = mgmt_invalidate_connection(phba, beiscsi_ep,
+	SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop  ep_cid = %d\n",
+			     beiscsi_ep->ep_cid);
+	tag = mgmt_invalidate_connection(phba, beiscsi_ep,
 					    beiscsi_ep->ep_cid, 1,
 					    savecfg_flag);
-	if (status != MGMT_STATUS_SUCCESS) {
+	if (!tag) {
 		SE_DEBUG(DBG_LVL_1,
 			 "mgmt_invalidate_connection Failed for cid=%d \n",
 			 beiscsi_ep->ep_cid);
+	} else {
+		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+					 phba->ctrl.mcc_numtag[tag]);
+		free_mcc_tag(&phba->ctrl, tag);
 	}
+	beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL);
+	beiscsi_free_ep(beiscsi_ep);
+	iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
 	beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
 	iscsi_conn_stop(cls_conn, flag);
 }
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index f92ffc5..1f512c2 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1a557fa..7c22616 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -40,7 +40,6 @@
 static unsigned int be_iopoll_budget = 10;
 static unsigned int be_max_phys_size = 64;
 static unsigned int enable_msix = 1;
-static unsigned int ring_mode;
 
 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
@@ -62,10 +61,10 @@
 /*------------------- PCI Driver operations and data ----------------- */
 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
-	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
 	{ 0 }
 };
 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
@@ -112,6 +111,7 @@
 	memset(phba, 0, sizeof(*phba));
 	phba->shost = shost;
 	phba->pcidev = pci_dev_get(pcidev);
+	pci_set_drvdata(pcidev, phba);
 
 	if (iscsi_host_add(shost, &phba->pcidev->dev))
 		goto free_devices;
@@ -143,6 +143,7 @@
 				struct pci_dev *pcidev)
 {
 	u8 __iomem *addr;
+	int pcicfg_reg;
 
 	addr = ioremap_nocache(pci_resource_start(pcidev, 2),
 			       pci_resource_len(pcidev, 2));
@@ -159,13 +160,19 @@
 	phba->db_va = addr;
 	phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
 
-	addr = ioremap_nocache(pci_resource_start(pcidev, 1),
-			       pci_resource_len(pcidev, 1));
+	if (phba->generation == BE_GEN2)
+		pcicfg_reg = 1;
+	else
+		pcicfg_reg = 0;
+
+	addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
+			       pci_resource_len(pcidev, pcicfg_reg));
+
 	if (addr == NULL)
 		goto pci_map_err;
 	phba->ctrl.pcicfg = addr;
 	phba->pci_va = addr;
-	phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
+	phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
 	return 0;
 
 pci_map_err:
@@ -230,29 +237,27 @@
 
 static void beiscsi_get_params(struct beiscsi_hba *phba)
 {
-	phba->params.ios_per_ctrl = BE2_IO_DEPTH;
-	phba->params.cxns_per_ctrl = BE2_MAX_SESSIONS;
-	phba->params.asyncpdus_per_ctrl = BE2_ASYNCPDUS;
-	phba->params.icds_per_ctrl = BE2_MAX_ICDS / 2;
+	phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
+				    - (phba->fw_config.iscsi_cid_count
+				    + BE2_TMFS
+				    + BE2_NOPOUT_REQ));
+	phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
+	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
+	phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
 	phba->params.num_sge_per_io = BE2_SGE;
 	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
 	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
 	phba->params.eq_timer = 64;
 	phba->params.num_eq_entries =
-	    (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
-								512) + 1) * 512;
+	    (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
+				    + BE2_TMFS) / 512) + 1) * 512;
 	phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
 				? 1024 : phba->params.num_eq_entries;
 	SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
-		 phba->params.num_eq_entries);
+			     phba->params.num_eq_entries);
 	phba->params.num_cq_entries =
-	    (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
-								512) + 1) * 512;
-	SE_DEBUG(DBG_LVL_8,
-		"phba->params.num_cq_entries=%d BE2_CMDS_PER_CXN=%d"
-		"BE2_LOGOUTS=%d BE2_TMFS=%d BE2_ASYNCPDUS=%d \n",
-		phba->params.num_cq_entries, BE2_CMDS_PER_CXN,
-		BE2_LOGOUTS, BE2_TMFS, BE2_ASYNCPDUS);
+	    (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
+				    + BE2_TMFS) / 512) + 1) * 512;
 	phba->params.wrbs_per_cxn = 256;
 }
 
@@ -443,7 +448,7 @@
 			if (phba->todo_mcc_cq)
 				queue_work(phba->wq, &phba->work_cqs);
 
-		if ((num_mcceq_processed) && (!num_ioeq_processed))
+			if ((num_mcceq_processed) && (!num_ioeq_processed))
 				hwi_ring_eq_db(phba, eq->id, 0,
 					      (num_ioeq_processed +
 					       num_mcceq_processed) , 1, 1);
@@ -561,6 +566,7 @@
 		SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
 		break;
 	case ISCSI_OP_LOGIN_RSP:
+	case ISCSI_OP_TEXT_RSP:
 		task = conn->login_task;
 		io_task = task->dd_data;
 		login_hdr = (struct iscsi_hdr *)ppdu;
@@ -631,29 +637,29 @@
  * alloc_wrb_handle - To allocate a wrb handle
  * @phba: The hba pointer
  * @cid: The cid to use for allocation
- * @index: index allocation and wrb index
  *
  * This happens under session_lock until submission to chip
  */
-struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
-				    int index)
+struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
 {
 	struct hwi_wrb_context *pwrb_context;
 	struct hwi_controller *phwi_ctrlr;
-	struct wrb_handle *pwrb_handle;
+	struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	pwrb_context = &phwi_ctrlr->wrb_context[cid];
-	if (pwrb_context->wrb_handles_available) {
+	if (pwrb_context->wrb_handles_available >= 2) {
 		pwrb_handle = pwrb_context->pwrb_handle_base[
 					    pwrb_context->alloc_index];
 		pwrb_context->wrb_handles_available--;
-		pwrb_handle->nxt_wrb_index = pwrb_handle->wrb_index;
 		if (pwrb_context->alloc_index ==
 						(phba->params.wrbs_per_cxn - 1))
 			pwrb_context->alloc_index = 0;
 		else
 			pwrb_context->alloc_index++;
+		pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
+						pwrb_context->alloc_index];
+		pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
 	} else
 		pwrb_handle = NULL;
 	return pwrb_handle;
@@ -671,9 +677,7 @@
 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
 		struct wrb_handle *pwrb_handle)
 {
-	if (!ring_mode)
-		pwrb_context->pwrb_handle_base[pwrb_context->free_index] =
-					       pwrb_handle;
+	pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
 	pwrb_context->wrb_handles_available++;
 	if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
 		pwrb_context->free_index = 0;
@@ -790,6 +794,7 @@
 		memcpy(task->sc->sense_buffer, sense,
 		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
 	}
+
 	if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
 		if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
 							& SOL_RES_CNT_MASK)
@@ -811,6 +816,7 @@
 	struct iscsi_conn *conn = beiscsi_conn->conn;
 
 	hdr = (struct iscsi_logout_rsp *)task->hdr;
+	hdr->opcode = ISCSI_OP_LOGOUT_RSP;
 	hdr->t2wait = 5;
 	hdr->t2retain = 0;
 	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
@@ -825,6 +831,9 @@
 					& SOL_EXP_CMD_SN_MASK) +
 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
 					/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+	hdr->dlength[0] = 0;
+	hdr->dlength[1] = 0;
+	hdr->dlength[2] = 0;
 	hdr->hlength = 0;
 	hdr->itt = io_task->libiscsi_itt;
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -839,6 +848,7 @@
 	struct beiscsi_io_task *io_task = task->dd_data;
 
 	hdr = (struct iscsi_tm_rsp *)task->hdr;
+	hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
 	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
 					& SOL_FLAGS_MASK) >> 24) | 0x80;
 	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
@@ -859,7 +869,6 @@
 {
 	struct hwi_wrb_context *pwrb_context;
 	struct wrb_handle *pwrb_handle = NULL;
-	struct sgl_handle *psgl_handle = NULL;
 	struct hwi_controller *phwi_ctrlr;
 	struct iscsi_task *task;
 	struct beiscsi_io_task *io_task;
@@ -867,22 +876,14 @@
 	struct iscsi_session *session = conn->session;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-	if (ring_mode) {
-		psgl_handle = phba->sgl_hndl_array[((psol->
-			      dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
-				32] & SOL_ICD_INDEX_MASK) >> 6)];
-		pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid];
-		task = psgl_handle->task;
-		pwrb_handle = NULL;
-	} else {
-		pwrb_context = &phwi_ctrlr->wrb_context[((psol->
+	pwrb_context = &phwi_ctrlr->wrb_context[((psol->
 				dw[offsetof(struct amap_sol_cqe, cid) / 32] &
-				SOL_CID_MASK) >> 6)];
-		pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
+				SOL_CID_MASK) >> 6) -
+				phba->fw_config.iscsi_cid_start];
+	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
 				dw[offsetof(struct amap_sol_cqe, wrb_index) /
 				32] & SOL_WRB_INDEX_MASK) >> 16)];
-		task = pwrb_handle->pio_handle;
-	}
+	task = pwrb_handle->pio_handle;
 
 	io_task = task->dd_data;
 	spin_lock(&phba->mgmt_sgl_lock);
@@ -923,31 +924,23 @@
 	struct iscsi_wrb *pwrb = NULL;
 	struct hwi_controller *phwi_ctrlr;
 	struct iscsi_task *task;
-	struct sgl_handle *psgl_handle = NULL;
 	unsigned int type;
 	struct iscsi_conn *conn = beiscsi_conn->conn;
 	struct iscsi_session *session = conn->session;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-	if (ring_mode) {
-		psgl_handle = phba->sgl_hndl_array[((psol->
-			      dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
-			      32] & SOL_ICD_INDEX_MASK) >> 6)];
-		task = psgl_handle->task;
-		type = psgl_handle->type;
-	} else {
-		pwrb_context = &phwi_ctrlr->
-				wrb_context[((psol->dw[offsetof
+	pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
 				(struct amap_sol_cqe, cid) / 32]
-				& SOL_CID_MASK) >> 6)];
-		pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
+				& SOL_CID_MASK) >> 6) -
+				phba->fw_config.iscsi_cid_start];
+	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
 				dw[offsetof(struct amap_sol_cqe, wrb_index) /
 				32] & SOL_WRB_INDEX_MASK) >> 16)];
-		task = pwrb_handle->pio_handle;
-		pwrb = pwrb_handle->pwrb;
-		type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
-			 WRB_TYPE_MASK) >> 28;
-	}
+	task = pwrb_handle->pio_handle;
+	pwrb = pwrb_handle->pwrb;
+	type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
+				 WRB_TYPE_MASK) >> 28;
+
 	spin_lock_bh(&session->lock);
 	switch (type) {
 	case HWH_TYPE_IO:
@@ -978,15 +971,7 @@
 		break;
 
 	default:
-		if (ring_mode)
-			shost_printk(KERN_WARNING, phba->shost,
-				"In hwi_complete_cmd, unknown type = %d"
-				"icd_index 0x%x CID 0x%x\n", type,
-				((psol->dw[offsetof(struct amap_sol_cqe_ring,
-				icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6),
-				psgl_handle->cid);
-		else
-			shost_printk(KERN_WARNING, phba->shost,
+		shost_printk(KERN_WARNING, phba->shost,
 				"In hwi_complete_cmd, unknown type = %d"
 				"wrb_index 0x%x CID 0x%x\n", type,
 				((psol->dw[offsetof(struct amap_iscsi_wrb,
@@ -1077,7 +1062,8 @@
 
 	WARN_ON(!pasync_handle);
 
-	pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid;
+	pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
+					     phba->fw_config.iscsi_cid_start;
 	pasync_handle->is_header = is_header;
 	pasync_handle->buffer_len = ((pdpdu_cqe->
 			dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
@@ -1327,9 +1313,10 @@
 	}
 
 	status = beiscsi_process_async_pdu(beiscsi_conn, phba,
-					   beiscsi_conn->beiscsi_conn_cid,
-					   phdr, hdr_len, pfirst_buffer,
-					   buf_len);
+					   (beiscsi_conn->beiscsi_conn_cid -
+					    phba->fw_config.iscsi_cid_start),
+					    phdr, hdr_len, pfirst_buffer,
+					    buf_len);
 
 	if (status == 0)
 		hwi_free_async_msg(phba, cri);
@@ -1422,6 +1409,48 @@
 	hwi_post_async_buffers(phba, pasync_handle->is_header);
 }
 
+static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
+{
+	struct be_queue_info *mcc_cq;
+	struct  be_mcc_compl *mcc_compl;
+	unsigned int num_processed = 0;
+
+	mcc_cq = &phba->ctrl.mcc_obj.cq;
+	mcc_compl = queue_tail_node(mcc_cq);
+	mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
+	while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
+
+		if (num_processed >= 32) {
+			hwi_ring_cq_db(phba, mcc_cq->id,
+					num_processed, 0, 0);
+			num_processed = 0;
+		}
+		if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
+			/* Interpret flags as an async trailer */
+			if (is_link_state_evt(mcc_compl->flags))
+				/* Interpret compl as a async link evt */
+				beiscsi_async_link_state_process(phba,
+				(struct be_async_event_link_state *) mcc_compl);
+			else
+				SE_DEBUG(DBG_LVL_1,
+					" Unsupported Async Event, flags"
+					" = 0x%08x \n", mcc_compl->flags);
+		} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
+			be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
+			atomic_dec(&phba->ctrl.mcc_obj.q.used);
+		}
+
+		mcc_compl->flags = 0;
+		queue_tail_inc(mcc_cq);
+		mcc_compl = queue_tail_node(mcc_cq);
+		mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
+		num_processed++;
+	}
+
+	if (num_processed > 0)
+		hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
+
+}
 
 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
 {
@@ -1431,7 +1460,8 @@
 	unsigned int num_processed = 0;
 	unsigned int tot_nump = 0;
 	struct beiscsi_conn *beiscsi_conn;
-	struct sgl_handle *psgl_handle = NULL;
+	struct beiscsi_endpoint *beiscsi_ep;
+	struct iscsi_endpoint *ep;
 	struct beiscsi_hba *phba;
 
 	cq = pbe_eq->cq;
@@ -1442,32 +1472,13 @@
 	       CQE_VALID_MASK) {
 		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
 
-		if (ring_mode) {
-			psgl_handle = phba->sgl_hndl_array[((sol->
-				      dw[offsetof(struct amap_sol_cqe_ring,
-				      icd_index) / 32] & SOL_ICD_INDEX_MASK)
-				      >> 6)];
-			beiscsi_conn = phba->conn_table[psgl_handle->cid];
-			if (!beiscsi_conn || !beiscsi_conn->ep) {
-				shost_printk(KERN_WARNING, phba->shost,
-				     "Connection table empty for cid = %d\n",
-				      psgl_handle->cid);
-				return 0;
-			}
+		ep = phba->ep_array[(u32) ((sol->
+				   dw[offsetof(struct amap_sol_cqe, cid) / 32] &
+				   SOL_CID_MASK) >> 6) -
+				   phba->fw_config.iscsi_cid_start];
 
-		} else {
-			beiscsi_conn = phba->conn_table[(u32) (sol->
-				 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
-				 SOL_CID_MASK) >> 6];
-
-			if (!beiscsi_conn || !beiscsi_conn->ep) {
-				shost_printk(KERN_WARNING, phba->shost,
-				     "Connection table empty for cid = %d\n",
-				     (u32)(sol->dw[offsetof(struct amap_sol_cqe,
-				     cid) / 32] & SOL_CID_MASK) >> 6);
-				return 0;
-			}
-		}
+		beiscsi_ep = ep->dd_data;
+		beiscsi_conn = beiscsi_ep->conn;
 
 		if (num_processed >= 32) {
 			hwi_ring_cq_db(phba, cq->id,
@@ -1511,21 +1522,13 @@
 		case CMD_CXN_KILLED_ITT_INVALID:
 		case CMD_CXN_KILLED_SEQ_OUTOFORDER:
 		case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
-			if (ring_mode) {
-				SE_DEBUG(DBG_LVL_1,
-				 "CQ Error notification for cmd.. "
-				 "code %d cid 0x%x\n",
-				 sol->dw[offsetof(struct amap_sol_cqe, code) /
-				 32] & CQE_CODE_MASK, psgl_handle->cid);
-			} else {
-				SE_DEBUG(DBG_LVL_1,
+			SE_DEBUG(DBG_LVL_1,
 				 "CQ Error notification for cmd.. "
 				 "code %d cid 0x%x\n",
 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
 				 32] & CQE_CODE_MASK,
 				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
 				 32] & SOL_CID_MASK));
-			}
 			break;
 		case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
 			SE_DEBUG(DBG_LVL_1,
@@ -1547,37 +1550,23 @@
 		case CXN_KILLED_OVER_RUN_RESIDUAL:
 		case CXN_KILLED_UNDER_RUN_RESIDUAL:
 		case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
-			if (ring_mode) {
-				SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
-				 "0x%x...\n",
-				 sol->dw[offsetof(struct amap_sol_cqe, code) /
-				 32] & CQE_CODE_MASK, psgl_handle->cid);
-			} else {
-				SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
+			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
 				 "0x%x...\n",
 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
 				 32] & CQE_CODE_MASK,
-				 sol->dw[offsetof(struct amap_sol_cqe, cid) /
-				 32] & CQE_CID_MASK);
-			}
+				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
+				 32] & CQE_CID_MASK));
 			iscsi_conn_failure(beiscsi_conn->conn,
 					   ISCSI_ERR_CONN_FAILED);
 			break;
 		case CXN_KILLED_RST_SENT:
 		case CXN_KILLED_RST_RCVD:
-			if (ring_mode) {
-				SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
-				"received/sent on CID 0x%x...\n",
-				 sol->dw[offsetof(struct amap_sol_cqe, code) /
-				 32] & CQE_CODE_MASK, psgl_handle->cid);
-			} else {
-				SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
+			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
 				"received/sent on CID 0x%x...\n",
 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
 				 32] & CQE_CODE_MASK,
-				 sol->dw[offsetof(struct amap_sol_cqe, cid) /
-				 32] & CQE_CID_MASK);
-			}
+				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
+				 32] & CQE_CID_MASK));
 			iscsi_conn_failure(beiscsi_conn->conn,
 					   ISCSI_ERR_CONN_FAILED);
 			break;
@@ -1586,8 +1575,8 @@
 				 "received on CID 0x%x...\n",
 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
 				 32] & CQE_CODE_MASK,
-				 sol->dw[offsetof(struct amap_sol_cqe, cid) /
-				 32] & CQE_CID_MASK);
+				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
+				 32] & CQE_CID_MASK));
 			break;
 		}
 
@@ -1604,7 +1593,7 @@
 	return tot_nump;
 }
 
-static void beiscsi_process_all_cqs(struct work_struct *work)
+void beiscsi_process_all_cqs(struct work_struct *work)
 {
 	unsigned long flags;
 	struct hwi_controller *phwi_ctrlr;
@@ -1624,6 +1613,7 @@
 		spin_lock_irqsave(&phba->isr_lock, flags);
 		phba->todo_mcc_cq = 0;
 		spin_unlock_irqrestore(&phba->isr_lock, flags);
+		beiscsi_process_mcc_isr(phba);
 	}
 
 	if (phba->todo_cq) {
@@ -1668,7 +1658,8 @@
 				      io_task->bhs_pa.u.a32.address_hi);
 
 	l_sg = sg;
-	for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
+	for (index = 0; (index < num_sg) && (index < 2); index++,
+							 sg = sg_next(sg)) {
 		if (index == 0) {
 			sg_len = sg_dma_len(sg);
 			addr = (u64) sg_dma_address(sg);
@@ -1679,11 +1670,7 @@
 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
 							sg_len);
 			sge_len = sg_len;
-			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
-							1);
 		} else {
-			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
-							0);
 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
 							pwrb, sge_len);
 			sg_len = sg_dma_len(sg);
@@ -1706,13 +1693,27 @@
 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
 			io_task->bhs_pa.u.a32.address_lo);
 
-	if (num_sg == 2)
-		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
+	if (num_sg == 1) {
+		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
+								1);
+		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
+								0);
+	} else if (num_sg == 2) {
+		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
+								0);
+		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
+								1);
+	} else {
+		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
+								0);
+		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
+								0);
+	}
 	sg = l_sg;
 	psgl++;
 	psgl++;
 	offset = 0;
-	for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
+	for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
 		sg_len = sg_dma_len(sg);
 		addr = (u64) sg_dma_address(sg);
 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
@@ -2048,10 +2049,9 @@
 	}
 	idx = 0;
 	pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
-	num_cxn_wrb =
-	    ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
-	     phba->params.wrbs_per_cxn);
-
+	num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
+		      ((sizeof(struct iscsi_wrb) *
+			phba->params.wrbs_per_cxn));
 	for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
 		pwrb_context = &phwi_ctrlr->wrb_context[index];
 		if (num_cxn_wrb) {
@@ -2064,9 +2064,9 @@
 		} else {
 			idx++;
 			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
-			num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
-					(sizeof(struct iscsi_wrb)) *
-					phba->params.wrbs_per_cxn);
+			num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
+				      ((sizeof(struct iscsi_wrb) *
+					phba->params.wrbs_per_cxn));
 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
 				pwrb_handle = pwrb_context->pwrb_handle_base[j];
 				pwrb_handle->pwrb = pwrb;
@@ -2383,7 +2383,7 @@
 						     &paddr);
 		if (!cq_vaddress)
 			goto create_cq_error;
-		ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2,
+		ret = be_fill_queue(cq, phba->params.num_cq_entries,
 				    sizeof(struct sol_cqe), cq_vaddress);
 		if (ret) {
 			shost_printk(KERN_ERR, phba->shost,
@@ -2634,7 +2634,8 @@
 				     "wrbq create failed.");
 			return status;
 		}
-		phwi_ctrlr->wrb_context[i].cid = phwi_context->be_wrbq[i].id;
+		phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
+								   id;
 	}
 	kfree(pwrb_arr);
 	return 0;
@@ -2803,17 +2804,6 @@
 		goto error;
 	}
 
-	if (phba->fw_config.iscsi_features == 0x1)
-		ring_mode = 1;
-	else
-		ring_mode = 0;
-	status = mgmt_get_fw_config(ctrl, phba);
-	if (status != 0) {
-		shost_printk(KERN_ERR, phba->shost,
-			     "Error getting fw config\n");
-		goto error;
-	}
-
 	status = beiscsi_create_cqs(phba, phwi_context);
 	if (status != 0) {
 		shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
@@ -2941,17 +2931,6 @@
 	phba->io_sgl_hndl_avbl = 0;
 	phba->eh_sgl_hndl_avbl = 0;
 
-	if (ring_mode) {
-		phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) *
-					      phba->params.icds_per_ctrl,
-						 GFP_KERNEL);
-		if (!phba->sgl_hndl_array) {
-			shost_printk(KERN_ERR, phba->shost,
-			     "Mem Alloc Failed. Failing to load\n");
-			return -ENOMEM;
-		}
-	}
-
 	mem_descr_sglh = phba->init_mem;
 	mem_descr_sglh += HWI_MEM_SGLH;
 	if (1 == mem_descr_sglh->num_elements) {
@@ -2959,8 +2938,6 @@
 						 phba->params.ios_per_ctrl,
 						 GFP_KERNEL);
 		if (!phba->io_sgl_hndl_base) {
-			if (ring_mode)
-				kfree(phba->sgl_hndl_array);
 			shost_printk(KERN_ERR, phba->shost,
 				     "Mem Alloc Failed. Failing to load\n");
 			return -ENOMEM;
@@ -3032,7 +3009,7 @@
 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
 			pfrag += phba->params.num_sge_per_io;
 			psgl_handle->sgl_index =
-				phba->fw_config.iscsi_cid_start + arr_index++;
+				phba->fw_config.iscsi_icd_start + arr_index++;
 		}
 		idx++;
 	}
@@ -3047,7 +3024,7 @@
 {
 	int i, new_cid;
 
-	phba->cid_array = kmalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
+	phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
 				  GFP_KERNEL);
 	if (!phba->cid_array) {
 		shost_printk(KERN_ERR, phba->shost,
@@ -3055,7 +3032,7 @@
 			     "hba_setup_cid_tbls\n");
 		return -ENOMEM;
 	}
-	phba->ep_array = kmalloc(sizeof(struct iscsi_endpoint *) *
+	phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
 				 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
 	if (!phba->ep_array) {
 		shost_printk(KERN_ERR, phba->shost,
@@ -3064,7 +3041,7 @@
 		kfree(phba->cid_array);
 		return -ENOMEM;
 	}
-	new_cid = phba->fw_config.iscsi_icd_start;
+	new_cid = phba->fw_config.iscsi_cid_start;
 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
 		phba->cid_array[i] = new_cid;
 		new_cid += 2;
@@ -3145,8 +3122,6 @@
 	if (hba_setup_cid_tbls(phba)) {
 		shost_printk(KERN_ERR, phba->shost,
 			     "Failed in hba_setup_cid_tbls\n");
-		if (ring_mode)
-			kfree(phba->sgl_hndl_array);
 		kfree(phba->io_sgl_hndl_base);
 		kfree(phba->eh_sgl_hndl_base);
 		goto do_cleanup_ctrlr;
@@ -3166,6 +3141,7 @@
 	struct be_queue_info *eq;
 	struct be_eq_entry *eqe = NULL;
 	int i, eq_msix;
+	unsigned int num_processed;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -3177,13 +3153,17 @@
 	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
 		eq = &phwi_context->be_eq[i].q;
 		eqe = queue_tail_node(eq);
-
+		num_processed = 0;
 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
 					& EQE_VALID_MASK) {
 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 			queue_tail_inc(eq);
 			eqe = queue_tail_node(eq);
+			num_processed++;
 		}
+
+		if (num_processed)
+			hwi_ring_eq_db(phba, eq->id, 1,	num_processed, 1, 1);
 	}
 }
 
@@ -3195,10 +3175,9 @@
 	if (mgmt_status)
 		shost_printk(KERN_WARNING, phba->shost,
 			     "mgmt_epfw_cleanup FAILED \n");
-	hwi_cleanup(phba);
+
 	hwi_purge_eq(phba);
-	if (ring_mode)
-		kfree(phba->sgl_hndl_array);
+	hwi_cleanup(phba);
 	kfree(phba->io_sgl_hndl_base);
 	kfree(phba->eh_sgl_hndl_base);
 	kfree(phba->cid_array);
@@ -3219,7 +3198,8 @@
 	 * We can always use 0 here because it is reserved by libiscsi for
 	 * login/startup related tasks.
 	 */
-	pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 0);
+	pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
+				       phba->fw_config.iscsi_cid_start));
 	pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
 	memset(pwrb, 0, sizeof(*pwrb));
 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
@@ -3283,8 +3263,7 @@
 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
 
 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
-	if (!ring_mode)
-		doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
+	doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
 			     << DB_DEF_PDU_WRB_INDEX_SHIFT;
 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
 
@@ -3328,8 +3307,9 @@
 	io_task->bhs_pa.u.a64.address = paddr;
 	io_task->libiscsi_itt = (itt_t)task->itt;
 	io_task->pwrb_handle = alloc_wrb_handle(phba,
-						beiscsi_conn->beiscsi_conn_cid,
-						task->itt);
+						beiscsi_conn->beiscsi_conn_cid -
+						phba->fw_config.iscsi_cid_start
+						);
 	io_task->conn = beiscsi_conn;
 
 	task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
@@ -3343,7 +3323,7 @@
 			goto free_hndls;
 	} else {
 		io_task->scsi_cmnd = NULL;
-		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
+		if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
 			if (!beiscsi_conn->login_in_progress) {
 				spin_lock(&phba->mgmt_sgl_lock);
 				io_task->psgl_handle = (struct sgl_handle *)
@@ -3370,21 +3350,16 @@
 	itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
 				 wrb_index << 16) | (unsigned int)
 				(io_task->psgl_handle->sgl_index));
-	if (ring_mode) {
-		phba->sgl_hndl_array[io_task->psgl_handle->sgl_index -
-				     phba->fw_config.iscsi_cid_start] =
-				     io_task->psgl_handle;
-		io_task->psgl_handle->task = task;
-		io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid;
-	} else
-		io_task->pwrb_handle->pio_handle = task;
+	io_task->pwrb_handle->pio_handle = task;
 
 	io_task->cmd_bhs->iscsi_hdr.itt = itt;
 	return 0;
 
 free_hndls:
 	phwi_ctrlr = phba->phwi_ctrlr;
-	pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
+	pwrb_context = &phwi_ctrlr->wrb_context[
+			beiscsi_conn->beiscsi_conn_cid -
+			phba->fw_config.iscsi_cid_start];
 	free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
 	io_task->pwrb_handle = NULL;
 	pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
@@ -3404,7 +3379,8 @@
 	struct hwi_controller *phwi_ctrlr;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-	pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
+	pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
+			- phba->fw_config.iscsi_cid_start];
 	if (io_task->pwrb_handle) {
 		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
 		io_task->pwrb_handle = NULL;
@@ -3460,18 +3436,12 @@
 			      ISCSI_OPCODE_SCSI_DATA_OUT);
 		AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
 			      &io_task->cmd_bhs->iscsi_data_pdu, 1);
-		if (ring_mode)
-			io_task->psgl_handle->type = INI_WR_CMD;
-		else
-			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
-				      INI_WR_CMD);
+		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+			      INI_WR_CMD);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
 	} else {
-		if (ring_mode)
-			io_task->psgl_handle->type = INI_RD_CMD;
-		else
-			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
-				      INI_RD_CMD);
+		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+			      INI_RD_CMD);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
 	}
 	memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
@@ -3496,8 +3466,7 @@
 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
 
 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
-	if (!ring_mode)
-		doorbell |= (io_task->pwrb_handle->wrb_index &
+	doorbell |= (io_task->pwrb_handle->wrb_index &
 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
 
@@ -3519,49 +3488,46 @@
 	unsigned int doorbell = 0;
 	unsigned int i, cid;
 	struct iscsi_task *aborted_task;
+	unsigned int tag;
 
 	cid = beiscsi_conn->beiscsi_conn_cid;
 	pwrb = io_task->pwrb_handle->pwrb;
+	memset(pwrb, 0, sizeof(*pwrb));
 	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
 		      be32_to_cpu(task->cmdsn));
 	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
 		      io_task->pwrb_handle->wrb_index);
 	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
 		      io_task->psgl_handle->sgl_index);
-
 	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
 	case ISCSI_OP_LOGIN:
-		if (ring_mode)
-			io_task->psgl_handle->type = TGT_DM_CMD;
-		else
-			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
-				      TGT_DM_CMD);
+		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+			      TGT_DM_CMD);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
 		hwi_write_buffer(pwrb, task);
 		break;
 	case ISCSI_OP_NOOP_OUT:
-		if (ring_mode)
-			io_task->psgl_handle->type = INI_RD_CMD;
+		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+			      INI_RD_CMD);
+		if (task->hdr->ttt == ISCSI_RESERVED_TAG)
+			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
 		else
-			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
-				      INI_RD_CMD);
+			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
 		hwi_write_buffer(pwrb, task);
 		break;
 	case ISCSI_OP_TEXT:
-		if (ring_mode)
-			io_task->psgl_handle->type = INI_WR_CMD;
-		else
-			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
-				      INI_WR_CMD);
-		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
+		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+			      TGT_DM_CMD);
+		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
 		hwi_write_buffer(pwrb, task);
 		break;
 	case ISCSI_OP_SCSI_TMFUNC:
 		session = conn->session;
 		i = ((struct iscsi_tm *)task->hdr)->rtt;
 		phwi_ctrlr = phba->phwi_ctrlr;
-		pwrb_context = &phwi_ctrlr->wrb_context[cid];
+		pwrb_context = &phwi_ctrlr->wrb_context[cid -
+					    phba->fw_config.iscsi_cid_start];
 		pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
 								>> 16];
 		aborted_task = pwrb_handle->pio_handle;
@@ -3572,22 +3538,25 @@
 		if (!aborted_io_task->scsi_cmnd)
 			return 0;
 
-		mgmt_invalidate_icds(phba,
+		tag = mgmt_invalidate_icds(phba,
 				     aborted_io_task->psgl_handle->sgl_index,
 				     cid);
-		if (ring_mode)
-			io_task->psgl_handle->type = INI_TMF_CMD;
-		else
-			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
-				      INI_TMF_CMD);
+		if (!tag) {
+			shost_printk(KERN_WARNING, phba->shost,
+				     "mgmt_invalidate_icds could not be"
+				     " submitted\n");
+		} else {
+			wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+						 phba->ctrl.mcc_numtag[tag]);
+			free_mcc_tag(&phba->ctrl, tag);
+		}
+		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+			      INI_TMF_CMD);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
 		hwi_write_buffer(pwrb, task);
 		break;
 	case ISCSI_OP_LOGOUT:
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
-		if (ring_mode)
-			io_task->psgl_handle->type = HWH_TYPE_LOGOUT;
-		else
 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
 				HWH_TYPE_LOGOUT);
 		hwi_write_buffer(pwrb, task);
@@ -3600,14 +3569,13 @@
 	}
 
 	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
-		      be32_to_cpu(task->data_count));
+		      task->data_count);
 	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
 		      io_task->pwrb_handle->nxt_wrb_index);
 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
 
 	doorbell |= cid & DB_WRB_POST_CID_MASK;
-	if (!ring_mode)
-		doorbell |= (io_task->pwrb_handle->wrb_index &
+	doorbell |= (io_task->pwrb_handle->wrb_index &
 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
 	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
@@ -3649,7 +3617,6 @@
 	return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
 }
 
-
 static void beiscsi_remove(struct pci_dev *pcidev)
 {
 	struct beiscsi_hba *phba = NULL;
@@ -3734,7 +3701,20 @@
 	}
 	SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
 
-	pci_set_drvdata(pcidev, phba);
+	switch (pcidev->device) {
+	case BE_DEVICE_ID1:
+	case OC_DEVICE_ID1:
+	case OC_DEVICE_ID2:
+		phba->generation = BE_GEN2;
+		break;
+	case BE_DEVICE_ID2:
+	case OC_DEVICE_ID3:
+		phba->generation = BE_GEN3;
+		break;
+	default:
+		phba->generation = 0;
+	}
+
 	if (enable_msix)
 		num_cpus = find_num_cpus();
 	else
@@ -3754,7 +3734,15 @@
 	spin_lock_init(&phba->io_sgl_lock);
 	spin_lock_init(&phba->mgmt_sgl_lock);
 	spin_lock_init(&phba->isr_lock);
+	ret = mgmt_get_fw_config(&phba->ctrl, phba);
+	if (ret != 0) {
+		shost_printk(KERN_ERR, phba->shost,
+			     "Error getting fw config\n");
+		goto free_port;
+	}
+	phba->shost->max_id = phba->fw_config.iscsi_cid_count;
 	beiscsi_get_params(phba);
+	phba->shost->can_queue = phba->params.ios_per_ctrl;
 	ret = beiscsi_init_port(phba);
 	if (ret < 0) {
 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -3762,6 +3750,15 @@
 		goto free_port;
 	}
 
+	for (i = 0; i < MAX_MCC_CMD ; i++) {
+		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
+		phba->ctrl.mcc_tag[i] = i + 1;
+		phba->ctrl.mcc_numtag[i + 1] = 0;
+		phba->ctrl.mcc_tag_available++;
+	}
+
+	phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
+
 	snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
 		 phba->shost->host_no);
 	phba->wq = create_workqueue(phba->wq_name);
@@ -3836,7 +3833,7 @@
 struct iscsi_transport beiscsi_iscsi_transport = {
 	.owner = THIS_MODULE,
 	.name = DRV_NAME,
-	.caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
+	.caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
 		CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
 	.param_mask = ISCSI_MAX_RECV_DLENGTH |
 		ISCSI_MAX_XMIT_DLENGTH |
@@ -3859,7 +3856,7 @@
 		ISCSI_USERNAME | ISCSI_PASSWORD |
 		ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
 		ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
-		ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
+		ISCSI_LU_RESET_TMO |
 		ISCSI_PING_TMO | ISCSI_RECV_TMO |
 		ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
 	.host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
@@ -3905,7 +3902,7 @@
 		SE_DEBUG(DBG_LVL_1,
 			 "beiscsi_module_init - Unable to  register beiscsi"
 			 "transport.\n");
-		ret = -ENOMEM;
+		return -ENOMEM;
 	}
 	SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
 		 &beiscsi_iscsi_transport);
@@ -3917,7 +3914,6 @@
 			 "beiscsi pci driver.\n");
 		goto unregister_iscsi_transport;
 	}
-	ring_mode = 0;
 	return 0;
 
 unregister_iscsi_transport:
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 25e6b20..c53a80a 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -40,31 +40,29 @@
 #define DRV_DESC		BE_NAME " " "Driver"
 
 #define BE_VENDOR_ID 		0x19A2
+/* DEVICE ID's for BE2 */
 #define BE_DEVICE_ID1		0x212
 #define OC_DEVICE_ID1		0x702
 #define OC_DEVICE_ID2		0x703
-#define OC_DEVICE_ID3		0x712
-#define OC_DEVICE_ID4		0x222
 
-#define BE2_MAX_SESSIONS	64
+/* DEVICE ID's for BE3 */
+#define BE_DEVICE_ID2		0x222
+#define OC_DEVICE_ID3		0x712
+
+#define BE2_IO_DEPTH		1024
+#define BE2_MAX_SESSIONS	256
 #define BE2_CMDS_PER_CXN	128
-#define BE2_LOGOUTS		BE2_MAX_SESSIONS
 #define BE2_TMFS		16
 #define BE2_NOPOUT_REQ		16
-#define BE2_ASYNCPDUS		BE2_MAX_SESSIONS
-#define BE2_MAX_ICDS		2048
 #define BE2_SGE			32
 #define BE2_DEFPDU_HDR_SZ	64
 #define BE2_DEFPDU_DATA_SZ	8192
-#define BE2_IO_DEPTH \
-	(BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ))
 
 #define MAX_CPUS		31
-#define BEISCSI_SGLIST_ELEMENTS	BE2_SGE
+#define BEISCSI_SGLIST_ELEMENTS	30
 
-#define BEISCSI_MAX_CMNDS	1024	/* Max IO's per Ctrlr sht->can_queue */
 #define BEISCSI_CMD_PER_LUN	128	/* scsi_host->cmd_per_lun */
-#define BEISCSI_MAX_SECTORS	2048	/* scsi_host->max_sectors */
+#define BEISCSI_MAX_SECTORS	256	/* scsi_host->max_sectors */
 
 #define BEISCSI_MAX_CMD_LEN	16	/* scsi_host->max_cmd_len */
 #define BEISCSI_NUM_MAX_LUN	256	/* scsi_host->max_lun */
@@ -330,6 +328,7 @@
 	struct workqueue_struct *wq;	/* The actuak work queue */
 	struct work_struct work_cqs;	/* The work being queued */
 	struct be_ctrl_info ctrl;
+	unsigned int generation;
 };
 
 struct beiscsi_session {
@@ -656,11 +655,12 @@
 
 } __packed;
 
-struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
-				    int index);
+struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid);
 void
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
 
+void beiscsi_process_all_cqs(struct work_struct *work);
+
 struct pdu_nop_out {
 	u32 dw[12];
 };
@@ -802,7 +802,6 @@
 	struct be_ring default_pdu_hdr;
 	struct be_ring default_pdu_data;
 	struct hwi_context_memory *phwi_ctxt;
-	unsigned short cq_errors[CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN];
 };
 
 enum hwh_type_enum {
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 79c2bd5..317bcd0 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -48,6 +48,14 @@
 					pfw_cfg->ulp[0].sq_base;
 		phba->fw_config.iscsi_cid_count =
 					pfw_cfg->ulp[0].sq_count;
+		if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
+			SE_DEBUG(DBG_LVL_8,
+				"FW reported MAX CXNS as %d \t"
+				"Max Supported = %d.\n",
+				phba->fw_config.iscsi_cid_count,
+				BE2_MAX_SESSIONS);
+			phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
+		}
 	} else {
 		shost_printk(KERN_WARNING, phba->shost,
 			     "Failed in mgmt_get_fw_config \n");
@@ -77,6 +85,7 @@
 	}
 	nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
 	req = nonemb_cmd.va;
+	memset(req, 0, sizeof(*req));
 	spin_lock(&ctrl->mbox_lock);
 	memset(wrb, 0, sizeof(*wrb));
 	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
@@ -140,10 +149,17 @@
 {
 	struct be_dma_mem nonemb_cmd;
 	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
-	struct be_sge *sge = nonembedded_sgl(wrb);
+	struct be_mcc_wrb *wrb;
+	struct be_sge *sge;
 	struct invalidate_commands_params_in *req;
-	int status = 0;
+	unsigned int tag = 0;
+
+	spin_lock(&ctrl->mbox_lock);
+	tag = alloc_mcc_tag(phba);
+	if (!tag) {
+		spin_unlock(&ctrl->mbox_lock);
+		return tag;
+	}
 
 	nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
 				sizeof(struct invalidate_commands_params_in),
@@ -156,8 +172,10 @@
 	}
 	nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
 	req = nonemb_cmd.va;
-	spin_lock(&ctrl->mbox_lock);
-	memset(wrb, 0, sizeof(*wrb));
+	memset(req, 0, sizeof(*req));
+	wrb = wrb_from_mccq(phba);
+	sge = nonembedded_sgl(wrb);
+	wrb->tag0 |= tag;
 
 	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -172,14 +190,12 @@
 	sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
 	sge->len = cpu_to_le32(nonemb_cmd.size);
 
-	status = be_mcc_notify_wait(phba);
-	if (status)
-		SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n");
+	be_mcc_notify(phba);
 	spin_unlock(&ctrl->mbox_lock);
 	if (nonemb_cmd.va)
 		pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
 				    nonemb_cmd.va, nonemb_cmd.dma);
-	return status;
+	return tag;
 }
 
 unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
@@ -189,13 +205,19 @@
 					 unsigned short savecfg_flag)
 {
 	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
-	struct iscsi_invalidate_connection_params_in *req =
-						embedded_payload(wrb);
-	int status = 0;
+	struct be_mcc_wrb *wrb;
+	struct iscsi_invalidate_connection_params_in *req;
+	unsigned int tag = 0;
 
 	spin_lock(&ctrl->mbox_lock);
-	memset(wrb, 0, sizeof(*wrb));
+	tag = alloc_mcc_tag(phba);
+	if (!tag) {
+		spin_unlock(&ctrl->mbox_lock);
+		return tag;
+	}
+	wrb = wrb_from_mccq(phba);
+	wrb->tag0 |= tag;
+	req = embedded_payload(wrb);
 
 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
@@ -208,35 +230,37 @@
 	else
 		req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
 	req->save_cfg = savecfg_flag;
-	status =  be_mcc_notify_wait(phba);
-	if (status)
-		SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n");
-
+	be_mcc_notify(phba);
 	spin_unlock(&ctrl->mbox_lock);
-	return status;
+	return tag;
 }
 
 unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
 				unsigned short cid, unsigned int upload_flag)
 {
 	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
-	struct tcp_upload_params_in *req = embedded_payload(wrb);
-	int status = 0;
+	struct be_mcc_wrb *wrb;
+	struct tcp_upload_params_in *req;
+	unsigned int tag = 0;
 
 	spin_lock(&ctrl->mbox_lock);
-	memset(wrb, 0, sizeof(*wrb));
+	tag = alloc_mcc_tag(phba);
+	if (!tag) {
+		spin_unlock(&ctrl->mbox_lock);
+		return tag;
+	}
+	wrb = wrb_from_mccq(phba);
+	req = embedded_payload(wrb);
+	wrb->tag0 |= tag;
 
 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 	be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
 			   OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
 	req->id = (unsigned short)cid;
 	req->upload_type = (unsigned char)upload_flag;
-	status = be_mcc_notify_wait(phba);
-	if (status)
-		SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n");
+	be_mcc_notify(phba);
 	spin_unlock(&ctrl->mbox_lock);
-	return status;
+	return tag;
 }
 
 int mgmt_open_connection(struct beiscsi_hba *phba,
@@ -248,13 +272,13 @@
 	struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr;
 	struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
 	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
-	struct tcp_connect_and_offload_in *req = embedded_payload(wrb);
+	struct be_mcc_wrb *wrb;
+	struct tcp_connect_and_offload_in *req;
 	unsigned short def_hdr_id;
 	unsigned short def_data_id;
 	struct phys_addr template_address = { 0, 0 };
 	struct phys_addr *ptemplate_address;
-	int status = 0;
+	unsigned int tag = 0;
 	unsigned int i;
 	unsigned short cid = beiscsi_ep->ep_cid;
 
@@ -266,7 +290,14 @@
 	ptemplate_address = &template_address;
 	ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
 	spin_lock(&ctrl->mbox_lock);
-	memset(wrb, 0, sizeof(*wrb));
+	tag = alloc_mcc_tag(phba);
+	if (!tag) {
+		spin_unlock(&ctrl->mbox_lock);
+		return tag;
+	}
+	wrb = wrb_from_mccq(phba);
+	req = embedded_payload(wrb);
+	wrb->tag0 |= tag;
 
 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -311,46 +342,36 @@
 	req->do_offload = 1;
 	req->dataout_template_pa.lo = ptemplate_address->lo;
 	req->dataout_template_pa.hi = ptemplate_address->hi;
-	status = be_mcc_notify_wait(phba);
-	if (!status) {
-		struct iscsi_endpoint *ep;
-		struct tcp_connect_and_offload_out *ptcpcnct_out =
-							embedded_payload(wrb);
-
-		ep = phba->ep_array[ptcpcnct_out->cid];
-		beiscsi_ep = ep->dd_data;
-		beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
-		beiscsi_ep->cid_vld = 1;
-		SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
-	} else
-		SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed\n");
+	be_mcc_notify(phba);
 	spin_unlock(&ctrl->mbox_lock);
-	return status;
+	return tag;
 }
 
-int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr)
+unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba)
 {
 	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
-	struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb);
-	int status;
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_get_mac_addr *req;
+	unsigned int tag = 0;
 
 	SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n");
 	spin_lock(&ctrl->mbox_lock);
-	memset(wrb, 0, sizeof(*wrb));
+	tag = alloc_mcc_tag(phba);
+	if (!tag) {
+		spin_unlock(&ctrl->mbox_lock);
+		return tag;
+	}
+
+	wrb = wrb_from_mccq(phba);
+	req = embedded_payload(wrb);
+	wrb->tag0 |= tag;
 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
 			   OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
 			   sizeof(*req));
 
-	status = be_mcc_notify_wait(phba);
-	if (!status) {
-		struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb);
-
-		memcpy(mac_addr, resp->mac_address, ETH_ALEN);
-	}
-
+	be_mcc_notify(phba);
 	spin_unlock(&ctrl->mbox_lock);
-	return status;
+	return tag;
 }
 
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 24eaff9..ecead6a 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -231,6 +231,7 @@
 	struct beiscsi_hba *phba;
 	struct beiscsi_sess *sess;
 	struct beiscsi_conn *conn;
+	struct iscsi_endpoint *openiscsi_ep;
 	unsigned short ip_type;
 	char dst6_addr[ISCSI_ADDRESS_BUF_LEN];
 	unsigned long dst_addr;
@@ -249,7 +250,4 @@
 					 unsigned short issue_reset,
 					 unsigned short savecfg_flag);
 
-unsigned char mgmt_fw_cmd(struct be_ctrl_info *ctrl,
-			  struct beiscsi_hba *phba,
-			  char *buf, unsigned int len);
 #endif
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 33b2294..1c4d121 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1426,8 +1426,8 @@
 		break;
 	case ISCSI_PARAM_CONN_ADDRESS:
 		if (bnx2i_conn->ep)
-			len = sprintf(buf, NIPQUAD_FMT "\n",
-				      NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
+			len = sprintf(buf, "%pI4\n",
+				      &bnx2i_conn->ep->cm_sk->dst_ip);
 		break;
 	default:
 		return iscsi_conn_get_param(cls_conn, param, buf);
@@ -1990,6 +1990,7 @@
 	.eh_abort_handler	= iscsi_eh_abort,
 	.eh_device_reset_handler = iscsi_eh_device_reset,
 	.eh_target_reset_handler = iscsi_eh_target_reset,
+	.change_queue_depth	= iscsi_change_queue_depth,
 	.can_queue		= 1024,
 	.max_sectors		= 127,
 	.cmd_per_lun		= 32,
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 9129bcf..cd05e04 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -219,18 +219,15 @@
 			break;
 		}
 		sa = (cdbp[8] << 8) + cdbp[9];
-		name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa);
-		if (name) {
+		name = get_sa_name(variable_length_arr, VARIABLE_LENGTH_SZ, sa);
+		if (name)
 			printk("%s", name);
-			if ((cdb_len > 0) && (len != cdb_len))
-				printk(", in_cdb_len=%d, ext_len=%d",
-				       len, cdb_len);
-		} else {
+		else
 			printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
-			if ((cdb_len > 0) && (len != cdb_len))
-				printk(", in_cdb_len=%d, ext_len=%d",
-				       len, cdb_len);
-		}
+
+		if ((cdb_len > 0) && (len != cdb_len))
+			printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len);
+
 		break;
 	case MAINTENANCE_IN:
 		sa = cdbp[1] & 0x1f;
@@ -349,6 +346,9 @@
 {
 	int k;
 
+	if (cmd->cmnd == NULL)
+		return;
+
 	scmd_printk(KERN_INFO, cmd, "CDB: ");
 	print_opcode_name(cmd->cmnd, cmd->cmd_len);
 
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 969c831..412853c 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -591,8 +591,7 @@
 	cxgb3i_conn_max_recv_dlength(conn);
 
 	spin_lock_bh(&conn->session->lock);
-	sprintf(conn->portal_address, NIPQUAD_FMT,
-		NIPQUAD(c3cn->daddr.sin_addr.s_addr));
+	sprintf(conn->portal_address, "%pI4", &c3cn->daddr.sin_addr.s_addr);
 	conn->portal_port = ntohs(c3cn->daddr.sin_port);
 	spin_unlock_bh(&conn->session->lock);
 
@@ -709,6 +708,12 @@
 {
 	struct cxgb3i_hba *hba = iscsi_host_priv(shost);
 
+	if (!hba->ndev) {
+		shost_printk(KERN_ERR, shost, "Could not set host param. "
+			     "Netdev for host not set.\n");
+		return -ENODEV;
+	}
+
 	cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
 
 	switch (param) {
@@ -739,6 +744,12 @@
 	struct cxgb3i_hba *hba = iscsi_host_priv(shost);
 	int len = 0;
 
+	if (!hba->ndev) {
+		shost_printk(KERN_ERR, shost, "Could not set host param. "
+			     "Netdev for host not set.\n");
+		return -ENODEV;
+	}
+
 	cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
 
 	switch (param) {
@@ -753,7 +764,7 @@
 		__be32 addr;
 
 		addr = cxgb3i_get_private_ipv4addr(hba->ndev);
-		len = sprintf(buf, NIPQUAD_FMT, NIPQUAD(addr));
+		len = sprintf(buf, "%pI4", &addr);
 		break;
 	}
 	default:
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index 15a00e8..3e08c43 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1675,10 +1675,11 @@
 	} else
 		c3cn->saddr.sin_addr.s_addr = sipv4;
 
-	c3cn_conn_debug("c3cn 0x%p, %u.%u.%u.%u,%u-%u.%u.%u.%u,%u SYN_SENT.\n",
-			c3cn, NIPQUAD(c3cn->saddr.sin_addr.s_addr),
+	c3cn_conn_debug("c3cn 0x%p, %pI4,%u-%pI4,%u SYN_SENT.\n",
+			c3cn,
+			&c3cn->saddr.sin_addr.s_addr,
 			ntohs(c3cn->saddr.sin_port),
-			NIPQUAD(c3cn->daddr.sin_addr.s_addr),
+			&c3cn->daddr.sin_addr.s_addr,
 			ntohs(c3cn->daddr.sin_port));
 
 	c3cn_set_state(c3cn, C3CN_STATE_CONNECTING);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
index 1fe3b0f..9c38539 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -461,10 +461,8 @@
 		skb = skb_peek(&c3cn->receive_queue);
 	}
 	read_unlock(&c3cn->callback_lock);
-	if (c3cn) {
-		c3cn->copied_seq += read;
-		cxgb3i_c3cn_rx_credits(c3cn, read);
-	}
+	c3cn->copied_seq += read;
+	cxgb3i_c3cn_rx_credits(c3cn, read);
 	conn->rxdata_octets += read;
 
 	if (err) {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4f0d013..bc9e94f 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -717,6 +717,8 @@
 	{"IBM", "2145" },
 	{"Pillar", "Axiom" },
 	{"Intel", "Multi-Flex"},
+	{"NETAPP", "LUN"},
+	{"AIX", "NVDISK"},
 	{NULL, NULL}
 };
 
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index c7076ce..3c5abf7 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1509,7 +1509,7 @@
 	char *cur = str;
 	int i = 1;
 
-	while (cur && isdigit(*cur) && i <= MAX_INT_PARAM) {
+	while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
 		ints[i++] = simple_strtoul(cur, NULL, 0);
 
 		if ((cur = strchr(cur, ',')) != NULL)
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index a680e18..e2bc779 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -1449,9 +1449,6 @@
 	if (offset > 15)
 		goto do_reject;
 
-	if (esp->flags & ESP_FLAG_DISABLE_SYNC)
-		offset = 0;
-
 	if (offset) {
 		int one_clock;
 
@@ -2405,12 +2402,6 @@
 	struct esp_target_data *tp = &esp->target[dev->id];
 	int goal_tags, queue_depth;
 
-	if (esp->flags & ESP_FLAG_DISABLE_SYNC) {
-		/* Bypass async domain validation */
-		dev->ppr  = 0;
-		dev->sdtr = 0;
-	}
-
 	goal_tags = 0;
 
 	if (dev->tagged_supported) {
@@ -2660,7 +2651,10 @@
 	struct esp *esp = shost_priv(host);
 	struct esp_target_data *tp = &esp->target[target->id];
 
-	tp->nego_goal_offset = offset;
+	if (esp->flags & ESP_FLAG_DISABLE_SYNC)
+		tp->nego_goal_offset = 0;
+	else
+		tp->nego_goal_offset = offset;
 	tp->flags |= ESP_TGT_CHECK_NEGO;
 }
 
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index bb208a6..3966c71 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -36,7 +36,7 @@
 
 #define DRV_NAME		"fnic"
 #define DRV_DESCRIPTION		"Cisco FCoE HBA Driver"
-#define DRV_VERSION		"1.0.0.1121"
+#define DRV_VERSION		"1.4.0.98"
 #define PFX			DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index fe1b1031..507e26c 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -620,6 +620,8 @@
 	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
 		shost_printk(KERN_INFO, fnic->lport->host,
 			     "firmware supports FIP\n");
+		/* enable directed and multicast */
+		vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
 		vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
 		vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
 	} else {
@@ -698,6 +700,8 @@
 		goto err_out_remove_scsi_host;
 	}
 
+	fc_lport_init_stats(lp);
+
 	fc_lport_config(lp);
 
 	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index d62b906..7c9ccbd 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -94,7 +94,7 @@
 	CMD_STATS_DUMP          = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
 
 	/* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
-	CMD_PACKET_FILTER	= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
+	CMD_PACKET_FILTER       = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
 
 	/* hang detection notification */
 	CMD_HANG_NOTIFY         = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 9e8fce0..ba3c94c 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -140,40 +140,40 @@
 #include "gdth.h"
 
 static void gdth_delay(int milliseconds);
-static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs);
+static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs);
 static irqreturn_t gdth_interrupt(int irq, void *dev_id);
 static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
                                     int gdth_from_wait, int* pIndex);
-static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
+static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
                                                                Scsi_Cmnd *scp);
 static int gdth_async_event(gdth_ha_str *ha);
 static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
 
-static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority);
+static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority);
 static void gdth_next(gdth_ha_str *ha);
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b);
+static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b);
 static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
-static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source,
-                                      ushort idx, gdth_evt_data *evt);
+static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
+                                      u16 idx, gdth_evt_data *evt);
 static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
-static void gdth_readapp_event(gdth_ha_str *ha, unchar application, 
+static void gdth_readapp_event(gdth_ha_str *ha, u8 application, 
                                gdth_evt_str *estr);
 static void gdth_clear_events(void);
 
 static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
-                                    char *buffer, ushort count);
+                                    char *buffer, u16 count);
 static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive);
+static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive);
 
 static void gdth_enable_int(gdth_ha_str *ha);
 static int gdth_test_busy(gdth_ha_str *ha);
 static int gdth_get_cmd_index(gdth_ha_str *ha);
 static void gdth_release_event(gdth_ha_str *ha);
-static int gdth_wait(gdth_ha_str *ha, int index,ulong32 time);
-static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode,
-                                             ulong32 p1, ulong64 p2,ulong64 p3);
+static int gdth_wait(gdth_ha_str *ha, int index,u32 time);
+static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
+                                             u32 p1, u64 p2,u64 p3);
 static int gdth_search_drives(gdth_ha_str *ha);
-static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive);
+static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive);
 
 static const char *gdth_ctr_name(gdth_ha_str *ha);
 
@@ -189,7 +189,7 @@
 static void gdth_scsi_done(struct scsi_cmnd *scp);
 
 #ifdef DEBUG_GDTH
-static unchar   DebugState = DEBUG_GDTH;
+static u8   DebugState = DEBUG_GDTH;
 
 #ifdef __SERIAL__
 #define MAX_SERBUF 160
@@ -270,30 +270,30 @@
 #endif
 
 #ifdef GDTH_STATISTICS
-static ulong32 max_rq=0, max_index=0, max_sg=0;
+static u32 max_rq=0, max_index=0, max_sg=0;
 #ifdef INT_COAL
-static ulong32 max_int_coal=0;
+static u32 max_int_coal=0;
 #endif
-static ulong32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
+static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
 static struct timer_list gdth_timer;
 #endif
 
-#define PTR2USHORT(a)   (ushort)(ulong)(a)
+#define PTR2USHORT(a)   (u16)(unsigned long)(a)
 #define GDTOFFSOF(a,b)  (size_t)&(((a*)0)->b)
 #define INDEX_OK(i,t)   ((i)<ARRAY_SIZE(t))
 
 #define BUS_L2P(a,b)    ((b)>(a)->virt_bus ? (b-1):(b))
 
 #ifdef CONFIG_ISA
-static unchar   gdth_drq_tab[4] = {5,6,7,7};            /* DRQ table */
+static u8   gdth_drq_tab[4] = {5,6,7,7};            /* DRQ table */
 #endif
 #if defined(CONFIG_EISA) || defined(CONFIG_ISA)
-static unchar   gdth_irq_tab[6] = {0,10,11,12,14,0};    /* IRQ table */
+static u8   gdth_irq_tab[6] = {0,10,11,12,14,0};    /* IRQ table */
 #endif
-static unchar   gdth_polling;                           /* polling if TRUE */
+static u8   gdth_polling;                           /* polling if TRUE */
 static int      gdth_ctr_count  = 0;                    /* controller count */
 static LIST_HEAD(gdth_instances);                       /* controller list */
-static unchar   gdth_write_through = FALSE;             /* write through */
+static u8   gdth_write_through = FALSE;             /* write through */
 static gdth_evt_str ebuffer[MAX_EVENTS];                /* event buffer */
 static int elastidx;
 static int eoldidx;
@@ -303,7 +303,7 @@
 #define DOU     2                               /* OUT data direction */
 #define DNO     DIN                             /* no data transfer */
 #define DUN     DIN                             /* unknown data direction */
-static unchar gdth_direction_tab[0x100] = {
+static u8 gdth_direction_tab[0x100] = {
     DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
     DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
     DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
@@ -390,7 +390,7 @@
 static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
 {
 	struct gdth_cmndinfo *priv = NULL;
-	ulong flags;
+	unsigned long flags;
 	int i;
 
 	spin_lock_irqsave(&ha->smp_lock, flags);
@@ -493,7 +493,7 @@
     return rval;
 }
 
-static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs)
+static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs)
 {
     *cyls = size /HEADS/SECS;
     if (*cyls <= MAXCYLS) {
@@ -514,9 +514,9 @@
 
 /* controller search and initialization functions */
 #ifdef CONFIG_EISA
-static int __init gdth_search_eisa(ushort eisa_adr)
+static int __init gdth_search_eisa(u16 eisa_adr)
 {
-    ulong32 id;
+    u32 id;
     
     TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr));
     id = inl(eisa_adr+ID0REG);
@@ -533,13 +533,13 @@
 #endif /* CONFIG_EISA */
 
 #ifdef CONFIG_ISA
-static int __init gdth_search_isa(ulong32 bios_adr)
+static int __init gdth_search_isa(u32 bios_adr)
 {
     void __iomem *addr;
-    ulong32 id;
+    u32 id;
 
     TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr));
-    if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(ulong32))) != NULL) {
+    if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(u32))) != NULL) {
         id = readl(addr);
         iounmap(addr);
         if (id == GDT2_ID)                          /* GDT2000 */
@@ -551,7 +551,7 @@
 
 #ifdef CONFIG_PCI
 
-static bool gdth_search_vortex(ushort device)
+static bool gdth_search_vortex(u16 device)
 {
 	if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
 		return true;
@@ -603,9 +603,9 @@
 static int __devinit gdth_pci_init_one(struct pci_dev *pdev,
 				       const struct pci_device_id *ent)
 {
-	ushort vendor = pdev->vendor;
-	ushort device = pdev->device;
-	ulong base0, base1, base2;
+	u16 vendor = pdev->vendor;
+	u16 device = pdev->device;
+	unsigned long base0, base1, base2;
 	int rc;
 	gdth_pci_str gdth_pcistr;
 	gdth_ha_str *ha = NULL;
@@ -658,10 +658,10 @@
 #endif /* CONFIG_PCI */
 
 #ifdef CONFIG_EISA
-static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha)
+static int __init gdth_init_eisa(u16 eisa_adr,gdth_ha_str *ha)
 {
-    ulong32 retries,id;
-    unchar prot_ver,eisacf,i,irq_found;
+    u32 retries,id;
+    u8 prot_ver,eisacf,i,irq_found;
 
     TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr));
     
@@ -688,7 +688,7 @@
         return 0;
     }
     ha->bmic = eisa_adr;
-    ha->brd_phys = (ulong32)eisa_adr >> 12;
+    ha->brd_phys = (u32)eisa_adr >> 12;
 
     outl(0,eisa_adr+MAILBOXREG);
     outl(0,eisa_adr+MAILBOXREG+4);
@@ -752,12 +752,12 @@
 #endif /* CONFIG_EISA */
 
 #ifdef CONFIG_ISA
-static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha)
+static int __init gdth_init_isa(u32 bios_adr,gdth_ha_str *ha)
 {
     register gdt2_dpram_str __iomem *dp2_ptr;
     int i;
-    unchar irq_drq,prot_ver;
-    ulong32 retries;
+    u8 irq_drq,prot_ver;
+    u32 retries;
 
     TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr));
 
@@ -812,7 +812,7 @@
         }
         gdth_delay(1);
     }
-    prot_ver = (unchar)readl(&dp2_ptr->u.ic.S_Info[0]);
+    prot_ver = (u8)readl(&dp2_ptr->u.ic.S_Info[0]);
     writeb(0, &dp2_ptr->u.ic.Status);
     writeb(0xff, &dp2_ptr->io.irqdel);
     if (prot_ver != PROTOCOL_VERSION) {
@@ -859,9 +859,9 @@
     register gdt6_dpram_str __iomem *dp6_ptr;
     register gdt6c_dpram_str __iomem *dp6c_ptr;
     register gdt6m_dpram_str __iomem *dp6m_ptr;
-    ulong32 retries;
-    unchar prot_ver;
-    ushort command;
+    u32 retries;
+    u8 prot_ver;
+    u16 command;
     int i, found = FALSE;
 
     TRACE(("gdth_init_pci()\n"));
@@ -871,7 +871,7 @@
     else
         ha->oem_id = OEM_ID_ICP;
     ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8);
-    ha->stype = (ulong32)pdev->device;
+    ha->stype = (u32)pdev->device;
     ha->irq = pdev->irq;
     ha->pdev = pdev;
     
@@ -891,7 +891,7 @@
             found = FALSE;
             for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
                 iounmap(ha->brd);
-                ha->brd = ioremap(i, sizeof(ushort)); 
+                ha->brd = ioremap(i, sizeof(u16)); 
                 if (ha->brd == NULL) {
                     printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
                     return 0;
@@ -947,7 +947,7 @@
             }
             gdth_delay(1);
         }
-        prot_ver = (unchar)readl(&dp6_ptr->u.ic.S_Info[0]);
+        prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]);
         writeb(0, &dp6_ptr->u.ic.S_Status);
         writeb(0xff, &dp6_ptr->io.irqdel);
         if (prot_ver != PROTOCOL_VERSION) {
@@ -1000,7 +1000,7 @@
             found = FALSE;
             for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
                 iounmap(ha->brd);
-                ha->brd = ioremap(i, sizeof(ushort)); 
+                ha->brd = ioremap(i, sizeof(u16)); 
                 if (ha->brd == NULL) {
                     printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
                     return 0;
@@ -1059,7 +1059,7 @@
             }
             gdth_delay(1);
         }
-        prot_ver = (unchar)readl(&dp6c_ptr->u.ic.S_Info[0]);
+        prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]);
         writeb(0, &dp6c_ptr->u.ic.Status);
         if (prot_ver != PROTOCOL_VERSION) {
             printk("GDT-PCI: Illegal protocol version\n");
@@ -1128,7 +1128,7 @@
             found = FALSE;
             for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
                 iounmap(ha->brd);
-                ha->brd = ioremap(i, sizeof(ushort)); 
+                ha->brd = ioremap(i, sizeof(u16)); 
                 if (ha->brd == NULL) {
                     printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
                     return 0;
@@ -1180,7 +1180,7 @@
             }
             gdth_delay(1);
         }
-        prot_ver = (unchar)readl(&dp6m_ptr->u.ic.S_Info[0]);
+        prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]);
         writeb(0, &dp6m_ptr->u.ic.S_Status);
         if (prot_ver != PROTOCOL_VERSION) {
             printk("GDT-PCI: Illegal protocol version\n");
@@ -1223,7 +1223,7 @@
             }
             gdth_delay(1);
         }
-        prot_ver = (unchar)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
+        prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
         writeb(0, &dp6m_ptr->u.ic.S_Status);
         if (prot_ver < 0x2b)      /* FW < x.43: no 64-bit DMA support */
             ha->dma64_support = 0;
@@ -1239,7 +1239,7 @@
 
 static void __devinit gdth_enable_int(gdth_ha_str *ha)
 {
-    ulong flags;
+    unsigned long flags;
     gdt2_dpram_str __iomem *dp2_ptr;
     gdt6_dpram_str __iomem *dp6_ptr;
     gdt6m_dpram_str __iomem *dp6m_ptr;
@@ -1274,14 +1274,14 @@
 }
 
 /* return IStatus if interrupt was from this card else 0 */
-static unchar gdth_get_status(gdth_ha_str *ha)
+static u8 gdth_get_status(gdth_ha_str *ha)
 {
-    unchar IStatus = 0;
+    u8 IStatus = 0;
 
     TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
 
         if (ha->type == GDT_EISA)
-            IStatus = inb((ushort)ha->bmic + EDOORREG);
+            IStatus = inb((u16)ha->bmic + EDOORREG);
         else if (ha->type == GDT_ISA)
             IStatus =
                 readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
@@ -1329,7 +1329,7 @@
         if (ha->cmd_tab[i].cmnd == UNUSED_CMND) {
             ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer;
             ha->cmd_tab[i].service = ha->pccb->Service;
-            ha->pccb->CommandIndex = (ulong32)i+2;
+            ha->pccb->CommandIndex = (u32)i+2;
             return (i+2);
         }
     }
@@ -1362,7 +1362,7 @@
     register gdt6c_dpram_str __iomem *dp6c_ptr;
     gdt6_dpram_str __iomem *dp6_ptr;
     gdt2_dpram_str __iomem *dp2_ptr;
-    ushort cp_count,dp_offset,cmd_no;
+    u16 cp_count,dp_offset,cmd_no;
     
     TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
 
@@ -1386,28 +1386,28 @@
         dp2_ptr = ha->brd;
         writew(dp_offset + DPMEM_COMMAND_OFFSET,
                     &dp2_ptr->u.ic.comm_queue[cmd_no].offset);
-        writew((ushort)cmd_ptr->Service,
+        writew((u16)cmd_ptr->Service,
                     &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id);
         memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
     } else if (ha->type == GDT_PCI) {
         dp6_ptr = ha->brd;
         writew(dp_offset + DPMEM_COMMAND_OFFSET,
                     &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
-        writew((ushort)cmd_ptr->Service,
+        writew((u16)cmd_ptr->Service,
                     &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
         memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
     } else if (ha->type == GDT_PCINEW) {
         dp6c_ptr = ha->brd;
         writew(dp_offset + DPMEM_COMMAND_OFFSET,
                     &dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
-        writew((ushort)cmd_ptr->Service,
+        writew((u16)cmd_ptr->Service,
                     &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
         memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
     } else if (ha->type == GDT_PCIMPR) {
         dp6m_ptr = ha->brd;
         writew(dp_offset + DPMEM_COMMAND_OFFSET,
                     &dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
-        writew((ushort)cmd_ptr->Service,
+        writew((u16)cmd_ptr->Service,
                     &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
         memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
     }
@@ -1420,14 +1420,14 @@
 
 #ifdef GDTH_STATISTICS
     {
-        ulong32 i,j;
+        u32 i,j;
         for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
             if (ha->cmd_tab[j].cmnd != UNUSED_CMND)
                 ++i;
         }
         if (max_index < i) {
             max_index = i;
-            TRACE3(("GDT: max_index = %d\n",(ushort)i));
+            TRACE3(("GDT: max_index = %d\n",(u16)i));
         }
     }
 #endif
@@ -1450,7 +1450,7 @@
     }
 }
 
-static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time)
+static int gdth_wait(gdth_ha_str *ha, int index, u32 time)
 {
     int answer_found = FALSE;
     int wait_index = 0;
@@ -1476,8 +1476,8 @@
 }
 
 
-static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode,
-                                            ulong32 p1, ulong64 p2, ulong64 p3)
+static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
+                                            u32 p1, u64 p2, u64 p3)
 {
     register gdth_cmd_str *cmd_ptr;
     int retries,index;
@@ -1501,35 +1501,35 @@
         if (service == CACHESERVICE) {
             if (opcode == GDT_IOCTL) {
                 cmd_ptr->u.ioctl.subfunc = p1;
-                cmd_ptr->u.ioctl.channel = (ulong32)p2;
-                cmd_ptr->u.ioctl.param_size = (ushort)p3;
+                cmd_ptr->u.ioctl.channel = (u32)p2;
+                cmd_ptr->u.ioctl.param_size = (u16)p3;
                 cmd_ptr->u.ioctl.p_param = ha->scratch_phys;
             } else {
                 if (ha->cache_feat & GDT_64BIT) {
-                    cmd_ptr->u.cache64.DeviceNo = (ushort)p1;
+                    cmd_ptr->u.cache64.DeviceNo = (u16)p1;
                     cmd_ptr->u.cache64.BlockNo  = p2;
                 } else {
-                    cmd_ptr->u.cache.DeviceNo = (ushort)p1;
-                    cmd_ptr->u.cache.BlockNo  = (ulong32)p2;
+                    cmd_ptr->u.cache.DeviceNo = (u16)p1;
+                    cmd_ptr->u.cache.BlockNo  = (u32)p2;
                 }
             }
         } else if (service == SCSIRAWSERVICE) {
             if (ha->raw_feat & GDT_64BIT) {
                 cmd_ptr->u.raw64.direction  = p1;
-                cmd_ptr->u.raw64.bus        = (unchar)p2;
-                cmd_ptr->u.raw64.target     = (unchar)p3;
-                cmd_ptr->u.raw64.lun        = (unchar)(p3 >> 8);
+                cmd_ptr->u.raw64.bus        = (u8)p2;
+                cmd_ptr->u.raw64.target     = (u8)p3;
+                cmd_ptr->u.raw64.lun        = (u8)(p3 >> 8);
             } else {
                 cmd_ptr->u.raw.direction  = p1;
-                cmd_ptr->u.raw.bus        = (unchar)p2;
-                cmd_ptr->u.raw.target     = (unchar)p3;
-                cmd_ptr->u.raw.lun        = (unchar)(p3 >> 8);
+                cmd_ptr->u.raw.bus        = (u8)p2;
+                cmd_ptr->u.raw.target     = (u8)p3;
+                cmd_ptr->u.raw.lun        = (u8)(p3 >> 8);
             }
         } else if (service == SCREENSERVICE) {
             if (opcode == GDT_REALTIME) {
-                *(ulong32 *)&cmd_ptr->u.screen.su.data[0] = p1;
-                *(ulong32 *)&cmd_ptr->u.screen.su.data[4] = (ulong32)p2;
-                *(ulong32 *)&cmd_ptr->u.screen.su.data[8] = (ulong32)p3;
+                *(u32 *)&cmd_ptr->u.screen.su.data[0] = p1;
+                *(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2;
+                *(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3;
             }
         }
         ha->cmd_len          = sizeof(gdth_cmd_str);
@@ -1555,9 +1555,9 @@
 
 static int __devinit gdth_search_drives(gdth_ha_str *ha)
 {
-    ushort cdev_cnt, i;
+    u16 cdev_cnt, i;
     int ok;
-    ulong32 bus_no, drv_cnt, drv_no, j;
+    u32 bus_no, drv_cnt, drv_no, j;
     gdth_getch_str *chn;
     gdth_drlist_str *drl;
     gdth_iochan_str *ioc;
@@ -1570,8 +1570,8 @@
 #endif
 
 #ifdef GDTH_RTC
-    unchar rtc[12];
-    ulong flags;
+    u8 rtc[12];
+    unsigned long flags;
 #endif     
    
     TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
@@ -1584,7 +1584,7 @@
         if (ok)
             ha->screen_feat = GDT_64BIT;
     }
-    if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC))
+    if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
         ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0);
     if (!ok) {
         printk("GDT-HA %d: Initialization error screen service (code %d)\n",
@@ -1609,11 +1609,11 @@
             rtc[j] = CMOS_READ(j);
     } while (rtc[0] != CMOS_READ(0));
     spin_unlock_irqrestore(&rtc_lock, flags);
-    TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0],
-            *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]));
+    TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(u32 *)&rtc[0],
+            *(u32 *)&rtc[4], *(u32 *)&rtc[8]));
     /* 3. send to controller firmware */
-    gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(ulong32 *)&rtc[0],
-                      *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]);
+    gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(u32 *)&rtc[0],
+                      *(u32 *)&rtc[4], *(u32 *)&rtc[8]);
 #endif  
  
     /* unfreeze all IOs */
@@ -1627,7 +1627,7 @@
         if (ok)
             ha->cache_feat = GDT_64BIT;
     }
-    if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC))
+    if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
         ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0);
     if (!ok) {
         printk("GDT-HA %d: Initialization error cache service (code %d)\n",
@@ -1635,7 +1635,7 @@
         return 0;
     }
     TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
-    cdev_cnt = (ushort)ha->info;
+    cdev_cnt = (u16)ha->info;
     ha->fw_vers = ha->service;
 
 #ifdef INT_COAL
@@ -1644,7 +1644,7 @@
         pmod = (gdth_perf_modes *)ha->pscratch;
         pmod->version          = 1;
         pmod->st_mode          = 1;    /* enable one status buffer */
-        *((ulong64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys;
+        *((u64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys;
         pmod->st_buff_indx1    = COALINDEX;
         pmod->st_buff_addr2    = 0;
         pmod->st_buff_u_addr2  = 0;
@@ -1705,7 +1705,7 @@
             else
                 ha->bus_id[bus_no] = 0xff;
         }       
-        ha->bus_cnt = (unchar)bus_no;
+        ha->bus_cnt = (u8)bus_no;
     }
     TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt));
 
@@ -1789,12 +1789,12 @@
 
         /* logical drives */
         if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT,
-                              INVALID_CHANNEL,sizeof(ulong32))) {
-            drv_cnt = *(ulong32 *)ha->pscratch;
+                              INVALID_CHANNEL,sizeof(u32))) {
+            drv_cnt = *(u32 *)ha->pscratch;
             if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST,
-                                  INVALID_CHANNEL,drv_cnt * sizeof(ulong32))) {
+                                  INVALID_CHANNEL,drv_cnt * sizeof(u32))) {
                 for (j = 0; j < drv_cnt; ++j) {
-                    drv_no = ((ulong32 *)ha->pscratch)[j];
+                    drv_no = ((u32 *)ha->pscratch)[j];
                     if (drv_no < MAX_LDRIVES) {
                         ha->hdr[drv_no].is_logdrv = TRUE;
                         TRACE2(("Drive %d is log. drive\n",drv_no));
@@ -1838,7 +1838,7 @@
         if (ok)
             ha->raw_feat = GDT_64BIT;
     }
-    if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC))
+    if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
         ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0);
     if (!ok) {
         printk("GDT-HA %d: Initialization error raw service (code %d)\n",
@@ -1854,7 +1854,7 @@
         if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) {
             TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n",
                     ha->info));
-            ha->raw_feat |= (ushort)ha->info;
+            ha->raw_feat |= (u16)ha->info;
         }
     } 
 
@@ -1865,7 +1865,7 @@
         if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) {
             TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n",
                     ha->info));
-            ha->cache_feat |= (ushort)ha->info;
+            ha->cache_feat |= (u16)ha->info;
         }
     }
 
@@ -1923,9 +1923,9 @@
     return 1;
 }
 
-static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
+static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
 {
-    ulong32 drv_cyls;
+    u32 drv_cyls;
     int drv_hds, drv_secs;
 
     TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive));
@@ -1944,17 +1944,17 @@
     } else {
         drv_hds = ha->info2 & 0xff;
         drv_secs = (ha->info2 >> 8) & 0xff;
-        drv_cyls = (ulong32)ha->hdr[hdrive].size / drv_hds / drv_secs;
+        drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs;
     }
-    ha->hdr[hdrive].heads = (unchar)drv_hds;
-    ha->hdr[hdrive].secs  = (unchar)drv_secs;
+    ha->hdr[hdrive].heads = (u8)drv_hds;
+    ha->hdr[hdrive].secs  = (u8)drv_secs;
     /* round size */
     ha->hdr[hdrive].size  = drv_cyls * drv_hds * drv_secs;
     
     if (ha->cache_feat & GDT_64BIT) {
         if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0)
             && ha->info2 != 0) {
-            ha->hdr[hdrive].size = ((ulong64)ha->info2 << 32) | ha->info;
+            ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info;
         }
     }
     TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n",
@@ -1964,7 +1964,7 @@
     if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) {
         TRACE2(("gdth_search_dr() cache drive %d devtype %d\n",
                 hdrive,ha->info));
-        ha->hdr[hdrive].devtype = (ushort)ha->info;
+        ha->hdr[hdrive].devtype = (u16)ha->info;
     }
 
     /* cluster info */
@@ -1972,14 +1972,14 @@
         TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n",
                 hdrive,ha->info));
         if (!shared_access)
-            ha->hdr[hdrive].cluster_type = (unchar)ha->info;
+            ha->hdr[hdrive].cluster_type = (u8)ha->info;
     }
 
     /* R/W attributes */
     if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) {
         TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n",
                 hdrive,ha->info));
-        ha->hdr[hdrive].rw_attribs = (unchar)ha->info;
+        ha->hdr[hdrive].rw_attribs = (u8)ha->info;
     }
 
     return 1;
@@ -1988,12 +1988,12 @@
 
 /* command queueing/sending functions */
 
-static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
+static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
 {
     struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
     register Scsi_Cmnd *pscp;
     register Scsi_Cmnd *nscp;
-    ulong flags;
+    unsigned long flags;
 
     TRACE(("gdth_putq() priority %d\n",priority));
     spin_lock_irqsave(&ha->smp_lock, flags);
@@ -2023,7 +2023,7 @@
         ++flags;
     if (max_rq < flags) {
         max_rq = flags;
-        TRACE3(("GDT: max_rq = %d\n",(ushort)max_rq));
+        TRACE3(("GDT: max_rq = %d\n",(u16)max_rq));
     }
 #endif
 }
@@ -2032,9 +2032,9 @@
 {
     register Scsi_Cmnd *pscp;
     register Scsi_Cmnd *nscp;
-    unchar b, t, l, firsttime;
-    unchar this_cmd, next_cmd;
-    ulong flags = 0;
+    u8 b, t, l, firsttime;
+    u8 this_cmd, next_cmd;
+    unsigned long flags = 0;
     int cmd_index;
 
     TRACE(("gdth_next() hanum %d\n", ha->hanum));
@@ -2282,20 +2282,20 @@
  * buffers, kmap_atomic() as needed.
  */
 static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
-                                    char *buffer, ushort count)
+                                    char *buffer, u16 count)
 {
-    ushort cpcount,i, max_sg = scsi_sg_count(scp);
-    ushort cpsum,cpnow;
+    u16 cpcount,i, max_sg = scsi_sg_count(scp);
+    u16 cpsum,cpnow;
     struct scatterlist *sl;
     char *address;
 
-    cpcount = min_t(ushort, count, scsi_bufflen(scp));
+    cpcount = min_t(u16, count, scsi_bufflen(scp));
 
     if (cpcount) {
         cpsum=0;
         scsi_for_each_sg(scp, sl, max_sg, i) {
             unsigned long flags;
-            cpnow = (ushort)sl->length;
+            cpnow = (u16)sl->length;
             TRACE(("copy_internal() now %d sum %d count %d %d\n",
                           cpnow, cpsum, cpcount, scsi_bufflen(scp)));
             if (cpsum+cpnow > cpcount) 
@@ -2325,7 +2325,7 @@
 
 static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
 {
-    unchar t;
+    u8 t;
     gdth_inq_data inq;
     gdth_rdcap_data rdc;
     gdth_sense_data sd;
@@ -2389,7 +2389,7 @@
 
       case READ_CAPACITY:
         TRACE2(("Read capacity hdrive %d\n",t));
-        if (ha->hdr[t].size > (ulong64)0xffffffff)
+        if (ha->hdr[t].size > (u64)0xffffffff)
             rdc.last_block_no = 0xffffffff;
         else
             rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
@@ -2425,12 +2425,12 @@
     return 0;
 }
 
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
+static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
 {
     register gdth_cmd_str *cmdp;
     struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
-    ulong32 cnt, blockcnt;
-    ulong64 no, blockno;
+    u32 cnt, blockcnt;
+    u64 no, blockno;
     int i, cmd_index, read_write, sgcnt, mode64;
 
     cmdp = ha->pccb;
@@ -2498,17 +2498,17 @@
 
     if (read_write) {
         if (scp->cmd_len == 16) {
-            memcpy(&no, &scp->cmnd[2], sizeof(ulong64));
+            memcpy(&no, &scp->cmnd[2], sizeof(u64));
             blockno = be64_to_cpu(no);
-            memcpy(&cnt, &scp->cmnd[10], sizeof(ulong32));
+            memcpy(&cnt, &scp->cmnd[10], sizeof(u32));
             blockcnt = be32_to_cpu(cnt);
         } else if (scp->cmd_len == 10) {
-            memcpy(&no, &scp->cmnd[2], sizeof(ulong32));
+            memcpy(&no, &scp->cmnd[2], sizeof(u32));
             blockno = be32_to_cpu(no);
-            memcpy(&cnt, &scp->cmnd[7], sizeof(ushort));
+            memcpy(&cnt, &scp->cmnd[7], sizeof(u16));
             blockcnt = be16_to_cpu(cnt);
         } else {
-            memcpy(&no, &scp->cmnd[0], sizeof(ulong32));
+            memcpy(&no, &scp->cmnd[0], sizeof(u32));
             blockno = be32_to_cpu(no) & 0x001fffffUL;
             blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
         }
@@ -2516,7 +2516,7 @@
             cmdp->u.cache64.BlockNo = blockno;
             cmdp->u.cache64.BlockCnt = blockcnt;
         } else {
-            cmdp->u.cache.BlockNo = (ulong32)blockno;
+            cmdp->u.cache.BlockNo = (u32)blockno;
             cmdp->u.cache.BlockCnt = blockcnt;
         }
 
@@ -2528,12 +2528,12 @@
             if (mode64) {
                 struct scatterlist *sl;
 
-                cmdp->u.cache64.DestAddr= (ulong64)-1;
+                cmdp->u.cache64.DestAddr= (u64)-1;
                 cmdp->u.cache64.sg_canz = sgcnt;
                 scsi_for_each_sg(scp, sl, sgcnt, i) {
                     cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
 #ifdef GDTH_DMA_STATISTICS
-                    if (cmdp->u.cache64.sg_lst[i].sg_ptr > (ulong64)0xffffffff)
+                    if (cmdp->u.cache64.sg_lst[i].sg_ptr > (u64)0xffffffff)
                         ha->dma64_cnt++;
                     else
                         ha->dma32_cnt++;
@@ -2555,8 +2555,8 @@
             }
 
 #ifdef GDTH_STATISTICS
-            if (max_sg < (ulong32)sgcnt) {
-                max_sg = (ulong32)sgcnt;
+            if (max_sg < (u32)sgcnt) {
+                max_sg = (u32)sgcnt;
                 TRACE3(("GDT: max_sg = %d\n",max_sg));
             }
 #endif
@@ -2572,7 +2572,7 @@
         TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
                cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt));
         ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) +
-            (ushort)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
+            (u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
     } else {
         TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
                cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
@@ -2581,7 +2581,7 @@
         TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
                cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
         ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
-            (ushort)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
+            (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
     }
     if (ha->cmd_len & 3)
         ha->cmd_len += (4 - (ha->cmd_len & 3));
@@ -2600,15 +2600,15 @@
     return cmd_index;
 }
 
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
+static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
 {
     register gdth_cmd_str *cmdp;
-    ushort i;
+    u16 i;
     dma_addr_t sense_paddr;
     int cmd_index, sgcnt, mode64;
-    unchar t,l;
+    u8 t,l;
     struct page *page;
-    ulong offset;
+    unsigned long offset;
     struct gdth_cmndinfo *cmndinfo;
 
     t = scp->device->id;
@@ -2654,7 +2654,7 @@
 
     } else {
         page = virt_to_page(scp->sense_buffer);
-        offset = (ulong)scp->sense_buffer & ~PAGE_MASK;
+        offset = (unsigned long)scp->sense_buffer & ~PAGE_MASK;
         sense_paddr = pci_map_page(ha->pdev,page,offset,
                                    16,PCI_DMA_FROMDEVICE);
 
@@ -2703,12 +2703,12 @@
             if (mode64) {
                 struct scatterlist *sl;
 
-                cmdp->u.raw64.sdata = (ulong64)-1;
+                cmdp->u.raw64.sdata = (u64)-1;
                 cmdp->u.raw64.sg_ranz = sgcnt;
                 scsi_for_each_sg(scp, sl, sgcnt, i) {
                     cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
 #ifdef GDTH_DMA_STATISTICS
-                    if (cmdp->u.raw64.sg_lst[i].sg_ptr > (ulong64)0xffffffff)
+                    if (cmdp->u.raw64.sg_lst[i].sg_ptr > (u64)0xffffffff)
                         ha->dma64_cnt++;
                     else
                         ha->dma32_cnt++;
@@ -2744,7 +2744,7 @@
                    cmdp->u.raw64.sg_lst[0].sg_len));
             /* evaluate command size */
             ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) +
-                (ushort)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
+                (u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
         } else {
             TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
                    cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
@@ -2752,7 +2752,7 @@
                    cmdp->u.raw.sg_lst[0].sg_len));
             /* evaluate command size */
             ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
-                (ushort)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
+                (u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
         }
     }
     /* check space */
@@ -2802,7 +2802,7 @@
     if (cmdp->OpCode == GDT_IOCTL) {
         TRACE2(("IOCTL\n"));
         ha->cmd_len = 
-            GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(ulong64);
+            GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64);
     } else if (cmdp->Service == CACHESERVICE) {
         TRACE2(("cache command %d\n",cmdp->OpCode));
         if (ha->cache_feat & GDT_64BIT)
@@ -2840,8 +2840,8 @@
 
 
 /* Controller event handling functions */
-static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source, 
-                                      ushort idx, gdth_evt_data *evt)
+static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source, 
+                                      u16 idx, gdth_evt_data *evt)
 {
     gdth_evt_str *e;
     struct timeval tv;
@@ -2890,7 +2890,7 @@
 {
     gdth_evt_str *e;
     int eindex;
-    ulong flags;
+    unsigned long flags;
 
     TRACE2(("gdth_read_event() handle %d\n", handle));
     spin_lock_irqsave(&ha->smp_lock, flags);
@@ -2919,12 +2919,12 @@
 }
 
 static void gdth_readapp_event(gdth_ha_str *ha,
-                               unchar application, gdth_evt_str *estr)
+                               u8 application, gdth_evt_str *estr)
 {
     gdth_evt_str *e;
     int eindex;
-    ulong flags;
-    unchar found = FALSE;
+    unsigned long flags;
+    u8 found = FALSE;
 
     TRACE2(("gdth_readapp_event() app. %d\n", application));
     spin_lock_irqsave(&ha->smp_lock, flags);
@@ -2969,9 +2969,9 @@
     gdt2_dpram_str __iomem *dp2_ptr;
     Scsi_Cmnd *scp;
     int rval, i;
-    unchar IStatus;
-    ushort Service;
-    ulong flags = 0;
+    u8 IStatus;
+    u16 Service;
+    unsigned long flags = 0;
 #ifdef INT_COAL
     int coalesced = FALSE;
     int next = FALSE;
@@ -3018,7 +3018,7 @@
         if (coalesced) {
             /* For coalesced requests all status
                information is found in the status buffer */
-            IStatus = (unchar)(pcs->status & 0xff);
+            IStatus = (u8)(pcs->status & 0xff);
         }
 #endif
     
@@ -3197,7 +3197,7 @@
             ++act_int_coal;
             if (act_int_coal > max_int_coal) {
                 max_int_coal = act_int_coal;
-                printk("GDT: max_int_coal = %d\n",(ushort)max_int_coal);
+                printk("GDT: max_int_coal = %d\n",(u16)max_int_coal);
             }
 #endif      
             /* see if there is another status */
@@ -3225,12 +3225,12 @@
 	return __gdth_interrupt(ha, false, NULL);
 }
 
-static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
+static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
                                                               Scsi_Cmnd *scp)
 {
     gdth_msg_str *msg;
     gdth_cmd_str *cmdp;
-    unchar b, t;
+    u8 b, t;
     struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
 
     cmdp = ha->pccb;
@@ -3263,7 +3263,7 @@
             cmdp->u.screen.su.msg.msg_addr  = ha->msg_phys;
             ha->cmd_offs_dpmem = 0;
             ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) 
-                + sizeof(ulong64);
+                + sizeof(u64);
             ha->cmd_cnt = 0;
             gdth_copy_command(ha);
             gdth_release_event(ha);
@@ -3297,7 +3297,7 @@
             cmdp->u.screen.su.msg.msg_addr  = ha->msg_phys;
             ha->cmd_offs_dpmem = 0;
             ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) 
-                + sizeof(ulong64);
+                + sizeof(u64);
             ha->cmd_cnt = 0;
             gdth_copy_command(ha);
             gdth_release_event(ha);
@@ -3335,7 +3335,7 @@
                         cmndinfo->OpCode));
                 /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */
                 if (cmndinfo->OpCode == GDT_CLUST_INFO) {
-                    ha->hdr[t].cluster_type = (unchar)ha->info;
+                    ha->hdr[t].cluster_type = (u8)ha->info;
                     if (!(ha->hdr[t].cluster_type & 
                         CLUSTER_MOUNTED)) {
                         /* NOT MOUNTED -> MOUNT */
@@ -3397,7 +3397,7 @@
                     ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
                 }
                 memset((char*)scp->sense_buffer,0,16);
-                if (ha->status == (ushort)S_CACHE_RESERV) {
+                if (ha->status == (u16)S_CACHE_RESERV) {
                     scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1);
                 } else {
                     scp->sense_buffer[0] = 0x70;
@@ -3614,16 +3614,16 @@
             cmdp->u.screen.su.msg.msg_addr  = ha->msg_phys;
             ha->cmd_offs_dpmem = 0;
             ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) 
-                + sizeof(ulong64);
+                + sizeof(u64);
             ha->cmd_cnt = 0;
             gdth_copy_command(ha);
             if (ha->type == GDT_EISA)
-                printk("[EISA slot %d] ",(ushort)ha->brd_phys);
+                printk("[EISA slot %d] ",(u16)ha->brd_phys);
             else if (ha->type == GDT_ISA)
-                printk("[DPMEM 0x%4X] ",(ushort)ha->brd_phys);
+                printk("[DPMEM 0x%4X] ",(u16)ha->brd_phys);
             else 
-                printk("[PCI %d/%d] ",(ushort)(ha->brd_phys>>8),
-                       (ushort)((ha->brd_phys>>3)&0x1f));
+                printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
+                       (u16)((ha->brd_phys>>3)&0x1f));
             gdth_release_event(ha);
         }
 
@@ -3640,7 +3640,7 @@
             ha->dvr.eu.async.service = ha->service;
             ha->dvr.eu.async.status  = ha->status;
             ha->dvr.eu.async.info    = ha->info;
-            *(ulong32 *)ha->dvr.eu.async.scsi_coord  = ha->info2;
+            *(u32 *)ha->dvr.eu.async.scsi_coord  = ha->info2;
         }
         gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr );
         gdth_log_event( &ha->dvr, NULL );
@@ -3648,8 +3648,8 @@
         /* new host drive from expand? */
         if (ha->service == CACHESERVICE && ha->status == 56) {
             TRACE2(("gdth_async_event(): new host drive %d created\n",
-                    (ushort)ha->info));
-            /* gdth_analyse_hdrive(hanum, (ushort)ha->info); */
+                    (u16)ha->info));
+            /* gdth_analyse_hdrive(hanum, (u16)ha->info); */
         }   
     }
     return 1;
@@ -3680,13 +3680,13 @@
         for (j=0,i=1; i < f[0]; i+=2) {
             switch (f[i+1]) {
               case 4:
-                stack.b[j++] = *(ulong32*)&dvr->eu.stream[(int)f[i]];
+                stack.b[j++] = *(u32*)&dvr->eu.stream[(int)f[i]];
                 break;
               case 2:
-                stack.b[j++] = *(ushort*)&dvr->eu.stream[(int)f[i]];
+                stack.b[j++] = *(u16*)&dvr->eu.stream[(int)f[i]];
                 break;
               case 1:
-                stack.b[j++] = *(unchar*)&dvr->eu.stream[(int)f[i]];
+                stack.b[j++] = *(u8*)&dvr->eu.stream[(int)f[i]];
                 break;
               default:
                 break;
@@ -3712,14 +3712,14 @@
 }
 
 #ifdef GDTH_STATISTICS
-static unchar	gdth_timer_running;
+static u8	gdth_timer_running;
 
-static void gdth_timeout(ulong data)
+static void gdth_timeout(unsigned long data)
 {
-    ulong32 i;
+    u32 i;
     Scsi_Cmnd *nscp;
     gdth_ha_str *ha;
-    ulong flags;
+    unsigned long flags;
 
     if(unlikely(list_empty(&gdth_instances))) {
 	    gdth_timer_running = 0;
@@ -3891,8 +3891,8 @@
 {
 	gdth_ha_str *ha = shost_priv(scp->device->host);
 	struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
-	unchar b, t;
-	ulong flags;
+	u8 b, t;
+	unsigned long flags;
 	enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
 
 	TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
@@ -3924,9 +3924,9 @@
 {
     gdth_ha_str *ha = shost_priv(scp->device->host);
     int i;
-    ulong flags;
+    unsigned long flags;
     Scsi_Cmnd *cmnd;
-    unchar b;
+    u8 b;
 
     TRACE2(("gdth_eh_bus_reset()\n"));
 
@@ -3974,7 +3974,7 @@
 
 static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
 {
-    unchar b, t;
+    u8 b, t;
     gdth_ha_str *ha = shost_priv(sdev->host);
     struct scsi_device *sd;
     unsigned capacity;
@@ -4062,7 +4062,7 @@
 {
     gdth_ioctl_event evt;
     gdth_ha_str *ha;
-    ulong flags;
+    unsigned long flags;
 
     if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event)))
         return -EFAULT;
@@ -4098,8 +4098,8 @@
 static int ioc_lockdrv(void __user *arg)
 {
     gdth_ioctl_lockdrv ldrv;
-    unchar i, j;
-    ulong flags;
+    u8 i, j;
+    unsigned long flags;
     gdth_ha_str *ha;
 
     if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
@@ -4165,7 +4165,7 @@
 {
     gdth_ioctl_general gen;
     char *buf = NULL;
-    ulong64 paddr; 
+    u64 paddr; 
     gdth_ha_str *ha;
     int rval;
 
@@ -4194,7 +4194,7 @@
                 gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo;
                 /* addresses */
                 if (ha->cache_feat & SCATTER_GATHER) {
-                    gen.command.u.cache64.DestAddr = (ulong64)-1;
+                    gen.command.u.cache64.DestAddr = (u64)-1;
                     gen.command.u.cache64.sg_canz = 1;
                     gen.command.u.cache64.sg_lst[0].sg_ptr = paddr;
                     gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len;
@@ -4207,7 +4207,7 @@
                 if (ha->cache_feat & SCATTER_GATHER) {
                     gen.command.u.cache.DestAddr = 0xffffffff;
                     gen.command.u.cache.sg_canz = 1;
-                    gen.command.u.cache.sg_lst[0].sg_ptr = (ulong32)paddr;
+                    gen.command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
                     gen.command.u.cache.sg_lst[0].sg_len = gen.data_len;
                     gen.command.u.cache.sg_lst[1].sg_len = 0;
                 } else {
@@ -4230,7 +4230,7 @@
                 gen.command.u.raw64.direction = gen.command.u.raw.direction;
                 /* addresses */
                 if (ha->raw_feat & SCATTER_GATHER) {
-                    gen.command.u.raw64.sdata = (ulong64)-1;
+                    gen.command.u.raw64.sdata = (u64)-1;
                     gen.command.u.raw64.sg_ranz = 1;
                     gen.command.u.raw64.sg_lst[0].sg_ptr = paddr;
                     gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len;
@@ -4244,14 +4244,14 @@
                 if (ha->raw_feat & SCATTER_GATHER) {
                     gen.command.u.raw.sdata = 0xffffffff;
                     gen.command.u.raw.sg_ranz = 1;
-                    gen.command.u.raw.sg_lst[0].sg_ptr = (ulong32)paddr;
+                    gen.command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
                     gen.command.u.raw.sg_lst[0].sg_len = gen.data_len;
                     gen.command.u.raw.sg_lst[1].sg_len = 0;
                 } else {
                     gen.command.u.raw.sdata = paddr;
                     gen.command.u.raw.sg_ranz = 0;
                 }
-                gen.command.u.raw.sense_data = (ulong32)paddr + gen.data_len;
+                gen.command.u.raw.sense_data = (u32)paddr + gen.data_len;
             }
         } else {
             gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
@@ -4283,7 +4283,7 @@
     gdth_ioctl_rescan *rsc;
     gdth_cmd_str *cmd;
     gdth_ha_str *ha;
-    unchar i;
+    u8 i;
     int rc = -ENOMEM;
     u32 cluster_type = 0;
 
@@ -4335,11 +4335,11 @@
 {
     gdth_ioctl_rescan *rsc;
     gdth_cmd_str *cmd;
-    ushort i, status, hdr_cnt;
-    ulong32 info;
+    u16 i, status, hdr_cnt;
+    u32 info;
     int cyls, hds, secs;
     int rc = -ENOMEM;
-    ulong flags;
+    unsigned long flags;
     gdth_ha_str *ha; 
 
     rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
@@ -4367,7 +4367,7 @@
 
         status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
         i = 0;
-        hdr_cnt = (status == S_OK ? (ushort)info : 0);
+        hdr_cnt = (status == S_OK ? (u16)info : 0);
     } else {
         i = rsc->hdr_no;
         hdr_cnt = i + 1;
@@ -4418,7 +4418,7 @@
         status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
 
         spin_lock_irqsave(&ha->smp_lock, flags);
-        ha->hdr[i].devtype = (status == S_OK ? (ushort)info : 0);
+        ha->hdr[i].devtype = (status == S_OK ? (u16)info : 0);
         spin_unlock_irqrestore(&ha->smp_lock, flags);
 
         cmd->Service = CACHESERVICE;
@@ -4432,7 +4432,7 @@
 
         spin_lock_irqsave(&ha->smp_lock, flags);
         ha->hdr[i].cluster_type = 
-            ((status == S_OK && !shared_access) ? (ushort)info : 0);
+            ((status == S_OK && !shared_access) ? (u16)info : 0);
         spin_unlock_irqrestore(&ha->smp_lock, flags);
         rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
 
@@ -4446,7 +4446,7 @@
         status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
 
         spin_lock_irqsave(&ha->smp_lock, flags);
-        ha->hdr[i].rw_attribs = (status == S_OK ? (ushort)info : 0);
+        ha->hdr[i].rw_attribs = (status == S_OK ? (u16)info : 0);
         spin_unlock_irqrestore(&ha->smp_lock, flags);
     }
  
@@ -4466,7 +4466,7 @@
 {
     gdth_ha_str *ha; 
     Scsi_Cmnd *scp;
-    ulong flags;
+    unsigned long flags;
     char cmnd[MAX_COMMAND_SIZE];   
     void __user *argp = (void __user *)arg;
 
@@ -4495,9 +4495,9 @@
       { 
         gdth_ioctl_osvers osv; 
 
-        osv.version = (unchar)(LINUX_VERSION_CODE >> 16);
-        osv.subversion = (unchar)(LINUX_VERSION_CODE >> 8);
-        osv.revision = (ushort)(LINUX_VERSION_CODE & 0xff);
+        osv.version = (u8)(LINUX_VERSION_CODE >> 16);
+        osv.subversion = (u8)(LINUX_VERSION_CODE >> 8);
+        osv.revision = (u16)(LINUX_VERSION_CODE & 0xff);
         if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers)))
                 return -EFAULT;
         break;
@@ -4512,10 +4512,10 @@
             return -EFAULT;
 
         if (ha->type == GDT_ISA || ha->type == GDT_EISA) {
-            ctrt.type = (unchar)((ha->stype>>20) - 0x10);
+            ctrt.type = (u8)((ha->stype>>20) - 0x10);
         } else {
             if (ha->type != GDT_PCIMPR) {
-                ctrt.type = (unchar)((ha->stype<<4) + 6);
+                ctrt.type = (u8)((ha->stype<<4) + 6);
             } else {
                 ctrt.type = 
                     (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
@@ -4546,7 +4546,7 @@
       case GDTIOCTL_LOCKCHN:
       {
         gdth_ioctl_lockchn lchn;
-        unchar i, j;
+        u8 i, j;
 
         if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) ||
             (NULL == (ha = gdth_find_ha(lchn.ionode))))
@@ -4670,7 +4670,7 @@
 };
 
 #ifdef CONFIG_ISA
-static int __init gdth_isa_probe_one(ulong32 isa_bios)
+static int __init gdth_isa_probe_one(u32 isa_bios)
 {
 	struct Scsi_Host *shp;
 	gdth_ha_str *ha;
@@ -4802,7 +4802,7 @@
 #endif /* CONFIG_ISA */
 
 #ifdef CONFIG_EISA
-static int __init gdth_eisa_probe_one(ushort eisa_slot)
+static int __init gdth_eisa_probe_one(u16 eisa_slot)
 {
 	struct Scsi_Host *shp;
 	gdth_ha_str *ha;
@@ -5120,7 +5120,7 @@
 	scsi_host_put(shp);
 }
 
-static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
+static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf)
 {
 	gdth_ha_str *ha;
 
@@ -5158,14 +5158,14 @@
 	if (probe_eisa_isa) {
 		/* scanning for controllers, at first: ISA controller */
 #ifdef CONFIG_ISA
-		ulong32 isa_bios;
+		u32 isa_bios;
 		for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL;
 		                isa_bios += 0x8000UL)
 			gdth_isa_probe_one(isa_bios);
 #endif
 #ifdef CONFIG_EISA
 		{
-			ushort eisa_slot;
+			u16 eisa_slot;
 			for (eisa_slot = 0x1000; eisa_slot <= 0x8000;
 			                         eisa_slot += 0x1000)
 				gdth_eisa_probe_one(eisa_slot);
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index 1646444..120a062 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -321,524 +321,524 @@
 
 /* screenservice message */
 typedef struct {                               
-    ulong32     msg_handle;                     /* message handle */
-    ulong32     msg_len;                        /* size of message */
-    ulong32     msg_alen;                       /* answer length */
-    unchar      msg_answer;                     /* answer flag */
-    unchar      msg_ext;                        /* more messages */
-    unchar      msg_reserved[2];
+    u32     msg_handle;                     /* message handle */
+    u32     msg_len;                        /* size of message */
+    u32     msg_alen;                       /* answer length */
+    u8      msg_answer;                     /* answer flag */
+    u8      msg_ext;                        /* more messages */
+    u8      msg_reserved[2];
     char        msg_text[MSGLEN+2];             /* the message text */
-} PACKED gdth_msg_str;
+} __attribute__((packed)) gdth_msg_str;
 
 
 /* IOCTL data structures */
 
 /* Status coalescing buffer for returning multiple requests per interrupt */
 typedef struct {
-    ulong32     status;
-    ulong32     ext_status;
-    ulong32     info0;
-    ulong32     info1;
-} PACKED gdth_coal_status;
+    u32     status;
+    u32     ext_status;
+    u32     info0;
+    u32     info1;
+} __attribute__((packed)) gdth_coal_status;
 
 /* performance mode data structure */
 typedef struct {
-    ulong32     version;            /* The version of this IOCTL structure. */
-    ulong32     st_mode;            /* 0=dis., 1=st_buf_addr1 valid, 2=both  */
-    ulong32     st_buff_addr1;      /* physical address of status buffer 1 */
-    ulong32     st_buff_u_addr1;    /* reserved for 64 bit addressing */
-    ulong32     st_buff_indx1;      /* reserved command idx. for this buffer */
-    ulong32     st_buff_addr2;      /* physical address of status buffer 1 */
-    ulong32     st_buff_u_addr2;    /* reserved for 64 bit addressing */
-    ulong32     st_buff_indx2;      /* reserved command idx. for this buffer */
-    ulong32     st_buff_size;       /* size of each buffer in bytes */
-    ulong32     cmd_mode;           /* 0 = mode disabled, 1 = cmd_buff_addr1 */ 
-    ulong32     cmd_buff_addr1;     /* physical address of cmd buffer 1 */   
-    ulong32     cmd_buff_u_addr1;   /* reserved for 64 bit addressing */
-    ulong32     cmd_buff_indx1;     /* cmd buf addr1 unique identifier */
-    ulong32     cmd_buff_addr2;     /* physical address of cmd buffer 1 */   
-    ulong32     cmd_buff_u_addr2;   /* reserved for 64 bit addressing */
-    ulong32     cmd_buff_indx2;     /* cmd buf addr1 unique identifier */
-    ulong32     cmd_buff_size;      /* size of each cmd bufer in bytes */
-    ulong32     reserved1;
-    ulong32     reserved2;
-} PACKED gdth_perf_modes;
+    u32     version;            /* The version of this IOCTL structure. */
+    u32     st_mode;            /* 0=dis., 1=st_buf_addr1 valid, 2=both  */
+    u32     st_buff_addr1;      /* physical address of status buffer 1 */
+    u32     st_buff_u_addr1;    /* reserved for 64 bit addressing */
+    u32     st_buff_indx1;      /* reserved command idx. for this buffer */
+    u32     st_buff_addr2;      /* physical address of status buffer 1 */
+    u32     st_buff_u_addr2;    /* reserved for 64 bit addressing */
+    u32     st_buff_indx2;      /* reserved command idx. for this buffer */
+    u32     st_buff_size;       /* size of each buffer in bytes */
+    u32     cmd_mode;           /* 0 = mode disabled, 1 = cmd_buff_addr1 */ 
+    u32     cmd_buff_addr1;     /* physical address of cmd buffer 1 */   
+    u32     cmd_buff_u_addr1;   /* reserved for 64 bit addressing */
+    u32     cmd_buff_indx1;     /* cmd buf addr1 unique identifier */
+    u32     cmd_buff_addr2;     /* physical address of cmd buffer 1 */   
+    u32     cmd_buff_u_addr2;   /* reserved for 64 bit addressing */
+    u32     cmd_buff_indx2;     /* cmd buf addr1 unique identifier */
+    u32     cmd_buff_size;      /* size of each cmd bufer in bytes */
+    u32     reserved1;
+    u32     reserved2;
+} __attribute__((packed)) gdth_perf_modes;
 
 /* SCSI drive info */
 typedef struct {
-    unchar      vendor[8];                      /* vendor string */
-    unchar      product[16];                    /* product string */
-    unchar      revision[4];                    /* revision */
-    ulong32     sy_rate;                        /* current rate for sync. tr. */
-    ulong32     sy_max_rate;                    /* max. rate for sync. tr. */
-    ulong32     no_ldrive;                      /* belongs to this log. drv.*/
-    ulong32     blkcnt;                         /* number of blocks */
-    ushort      blksize;                        /* size of block in bytes */
-    unchar      available;                      /* flag: access is available */
-    unchar      init;                           /* medium is initialized */
-    unchar      devtype;                        /* SCSI devicetype */
-    unchar      rm_medium;                      /* medium is removable */
-    unchar      wp_medium;                      /* medium is write protected */
-    unchar      ansi;                           /* SCSI I/II or III? */
-    unchar      protocol;                       /* same as ansi */
-    unchar      sync;                           /* flag: sync. transfer enab. */
-    unchar      disc;                           /* flag: disconnect enabled */
-    unchar      queueing;                       /* flag: command queing enab. */
-    unchar      cached;                         /* flag: caching enabled */
-    unchar      target_id;                      /* target ID of device */
-    unchar      lun;                            /* LUN id of device */
-    unchar      orphan;                         /* flag: drive fragment */
-    ulong32     last_error;                     /* sense key or drive state */
-    ulong32     last_result;                    /* result of last command */
-    ulong32     check_errors;                   /* err. in last surface check */
-    unchar      percent;                        /* progress for surface check */
-    unchar      last_check;                     /* IOCTRL operation */
-    unchar      res[2];
-    ulong32     flags;                          /* from 1.19/2.19: raw reserv.*/
-    unchar      multi_bus;                      /* multi bus dev? (fibre ch.) */
-    unchar      mb_status;                      /* status: available? */
-    unchar      res2[2];
-    unchar      mb_alt_status;                  /* status on second bus */
-    unchar      mb_alt_bid;                     /* number of second bus */
-    unchar      mb_alt_tid;                     /* target id on second bus */
-    unchar      res3;
-    unchar      fc_flag;                        /* from 1.22/2.22: info valid?*/
-    unchar      res4;
-    ushort      fc_frame_size;                  /* frame size (bytes) */
+    u8      vendor[8];                      /* vendor string */
+    u8      product[16];                    /* product string */
+    u8      revision[4];                    /* revision */
+    u32     sy_rate;                        /* current rate for sync. tr. */
+    u32     sy_max_rate;                    /* max. rate for sync. tr. */
+    u32     no_ldrive;                      /* belongs to this log. drv.*/
+    u32     blkcnt;                         /* number of blocks */
+    u16      blksize;                        /* size of block in bytes */
+    u8      available;                      /* flag: access is available */
+    u8      init;                           /* medium is initialized */
+    u8      devtype;                        /* SCSI devicetype */
+    u8      rm_medium;                      /* medium is removable */
+    u8      wp_medium;                      /* medium is write protected */
+    u8      ansi;                           /* SCSI I/II or III? */
+    u8      protocol;                       /* same as ansi */
+    u8      sync;                           /* flag: sync. transfer enab. */
+    u8      disc;                           /* flag: disconnect enabled */
+    u8      queueing;                       /* flag: command queing enab. */
+    u8      cached;                         /* flag: caching enabled */
+    u8      target_id;                      /* target ID of device */
+    u8      lun;                            /* LUN id of device */
+    u8      orphan;                         /* flag: drive fragment */
+    u32     last_error;                     /* sense key or drive state */
+    u32     last_result;                    /* result of last command */
+    u32     check_errors;                   /* err. in last surface check */
+    u8      percent;                        /* progress for surface check */
+    u8      last_check;                     /* IOCTRL operation */
+    u8      res[2];
+    u32     flags;                          /* from 1.19/2.19: raw reserv.*/
+    u8      multi_bus;                      /* multi bus dev? (fibre ch.) */
+    u8      mb_status;                      /* status: available? */
+    u8      res2[2];
+    u8      mb_alt_status;                  /* status on second bus */
+    u8      mb_alt_bid;                     /* number of second bus */
+    u8      mb_alt_tid;                     /* target id on second bus */
+    u8      res3;
+    u8      fc_flag;                        /* from 1.22/2.22: info valid?*/
+    u8      res4;
+    u16      fc_frame_size;                  /* frame size (bytes) */
     char        wwn[8];                         /* world wide name */
-} PACKED gdth_diskinfo_str;
+} __attribute__((packed)) gdth_diskinfo_str;
 
 /* get SCSI channel count  */
 typedef struct {
-    ulong32     channel_no;                     /* number of channel */
-    ulong32     drive_cnt;                      /* drive count */
-    unchar      siop_id;                        /* SCSI processor ID */
-    unchar      siop_state;                     /* SCSI processor state */ 
-} PACKED gdth_getch_str;
+    u32     channel_no;                     /* number of channel */
+    u32     drive_cnt;                      /* drive count */
+    u8      siop_id;                        /* SCSI processor ID */
+    u8      siop_state;                     /* SCSI processor state */ 
+} __attribute__((packed)) gdth_getch_str;
 
 /* get SCSI drive numbers */
 typedef struct {
-    ulong32     sc_no;                          /* SCSI channel */
-    ulong32     sc_cnt;                         /* sc_list[] elements */
-    ulong32     sc_list[MAXID];                 /* minor device numbers */
-} PACKED gdth_drlist_str;
+    u32     sc_no;                          /* SCSI channel */
+    u32     sc_cnt;                         /* sc_list[] elements */
+    u32     sc_list[MAXID];                 /* minor device numbers */
+} __attribute__((packed)) gdth_drlist_str;
 
 /* get grown/primary defect count */
 typedef struct {
-    unchar      sddc_type;                      /* 0x08: grown, 0x10: prim. */
-    unchar      sddc_format;                    /* list entry format */
-    unchar      sddc_len;                       /* list entry length */
-    unchar      sddc_res;
-    ulong32     sddc_cnt;                       /* entry count */
-} PACKED gdth_defcnt_str;
+    u8      sddc_type;                      /* 0x08: grown, 0x10: prim. */
+    u8      sddc_format;                    /* list entry format */
+    u8      sddc_len;                       /* list entry length */
+    u8      sddc_res;
+    u32     sddc_cnt;                       /* entry count */
+} __attribute__((packed)) gdth_defcnt_str;
 
 /* disk statistics */
 typedef struct {
-    ulong32     bid;                            /* SCSI channel */
-    ulong32     first;                          /* first SCSI disk */
-    ulong32     entries;                        /* number of elements */
-    ulong32     count;                          /* (R) number of init. el. */
-    ulong32     mon_time;                       /* time stamp */
+    u32     bid;                            /* SCSI channel */
+    u32     first;                          /* first SCSI disk */
+    u32     entries;                        /* number of elements */
+    u32     count;                          /* (R) number of init. el. */
+    u32     mon_time;                       /* time stamp */
     struct {
-        unchar  tid;                            /* target ID */
-        unchar  lun;                            /* LUN */
-        unchar  res[2];
-        ulong32 blk_size;                       /* block size in bytes */
-        ulong32 rd_count;                       /* bytes read */
-        ulong32 wr_count;                       /* bytes written */
-        ulong32 rd_blk_count;                   /* blocks read */
-        ulong32 wr_blk_count;                   /* blocks written */
-        ulong32 retries;                        /* retries */
-        ulong32 reassigns;                      /* reassigns */
-    } PACKED list[1];
-} PACKED gdth_dskstat_str;
+        u8  tid;                            /* target ID */
+        u8  lun;                            /* LUN */
+        u8  res[2];
+        u32 blk_size;                       /* block size in bytes */
+        u32 rd_count;                       /* bytes read */
+        u32 wr_count;                       /* bytes written */
+        u32 rd_blk_count;                   /* blocks read */
+        u32 wr_blk_count;                   /* blocks written */
+        u32 retries;                        /* retries */
+        u32 reassigns;                      /* reassigns */
+    } __attribute__((packed)) list[1];
+} __attribute__((packed)) gdth_dskstat_str;
 
 /* IO channel header */
 typedef struct {
-    ulong32     version;                        /* version (-1UL: newest) */
-    unchar      list_entries;                   /* list entry count */
-    unchar      first_chan;                     /* first channel number */
-    unchar      last_chan;                      /* last channel number */
-    unchar      chan_count;                     /* (R) channel count */
-    ulong32     list_offset;                    /* offset of list[0] */
-} PACKED gdth_iochan_header;
+    u32     version;                        /* version (-1UL: newest) */
+    u8      list_entries;                   /* list entry count */
+    u8      first_chan;                     /* first channel number */
+    u8      last_chan;                      /* last channel number */
+    u8      chan_count;                     /* (R) channel count */
+    u32     list_offset;                    /* offset of list[0] */
+} __attribute__((packed)) gdth_iochan_header;
 
 /* get IO channel description */
 typedef struct {
     gdth_iochan_header  hdr;
     struct {
-        ulong32         address;                /* channel address */
-        unchar          type;                   /* type (SCSI, FCAL) */
-        unchar          local_no;               /* local number */
-        ushort          features;               /* channel features */
-    } PACKED list[MAXBUS];
-} PACKED gdth_iochan_str;
+        u32         address;                /* channel address */
+        u8          type;                   /* type (SCSI, FCAL) */
+        u8          local_no;               /* local number */
+        u16          features;               /* channel features */
+    } __attribute__((packed)) list[MAXBUS];
+} __attribute__((packed)) gdth_iochan_str;
 
 /* get raw IO channel description */
 typedef struct {
     gdth_iochan_header  hdr;
     struct {
-        unchar      proc_id;                    /* processor id */
-        unchar      proc_defect;                /* defect ? */
-        unchar      reserved[2];
-    } PACKED list[MAXBUS];
-} PACKED gdth_raw_iochan_str;
+        u8      proc_id;                    /* processor id */
+        u8      proc_defect;                /* defect ? */
+        u8      reserved[2];
+    } __attribute__((packed)) list[MAXBUS];
+} __attribute__((packed)) gdth_raw_iochan_str;
 
 /* array drive component */
 typedef struct {
-    ulong32     al_controller;                  /* controller ID */
-    unchar      al_cache_drive;                 /* cache drive number */
-    unchar      al_status;                      /* cache drive state */
-    unchar      al_res[2];     
-} PACKED gdth_arraycomp_str;
+    u32     al_controller;                  /* controller ID */
+    u8      al_cache_drive;                 /* cache drive number */
+    u8      al_status;                      /* cache drive state */
+    u8      al_res[2];     
+} __attribute__((packed)) gdth_arraycomp_str;
 
 /* array drive information */
 typedef struct {
-    unchar      ai_type;                        /* array type (RAID0,4,5) */
-    unchar      ai_cache_drive_cnt;             /* active cachedrives */
-    unchar      ai_state;                       /* array drive state */
-    unchar      ai_master_cd;                   /* master cachedrive */
-    ulong32     ai_master_controller;           /* ID of master controller */
-    ulong32     ai_size;                        /* user capacity [sectors] */
-    ulong32     ai_striping_size;               /* striping size [sectors] */
-    ulong32     ai_secsize;                     /* sector size [bytes] */
-    ulong32     ai_err_info;                    /* failed cache drive */
-    unchar      ai_name[8];                     /* name of the array drive */
-    unchar      ai_controller_cnt;              /* number of controllers */
-    unchar      ai_removable;                   /* flag: removable */
-    unchar      ai_write_protected;             /* flag: write protected */
-    unchar      ai_devtype;                     /* type: always direct access */
+    u8      ai_type;                        /* array type (RAID0,4,5) */
+    u8      ai_cache_drive_cnt;             /* active cachedrives */
+    u8      ai_state;                       /* array drive state */
+    u8      ai_master_cd;                   /* master cachedrive */
+    u32     ai_master_controller;           /* ID of master controller */
+    u32     ai_size;                        /* user capacity [sectors] */
+    u32     ai_striping_size;               /* striping size [sectors] */
+    u32     ai_secsize;                     /* sector size [bytes] */
+    u32     ai_err_info;                    /* failed cache drive */
+    u8      ai_name[8];                     /* name of the array drive */
+    u8      ai_controller_cnt;              /* number of controllers */
+    u8      ai_removable;                   /* flag: removable */
+    u8      ai_write_protected;             /* flag: write protected */
+    u8      ai_devtype;                     /* type: always direct access */
     gdth_arraycomp_str  ai_drives[35];          /* drive components: */
-    unchar      ai_drive_entries;               /* number of drive components */
-    unchar      ai_protected;                   /* protection flag */
-    unchar      ai_verify_state;                /* state of a parity verify */
-    unchar      ai_ext_state;                   /* extended array drive state */
-    unchar      ai_expand_state;                /* array expand state (>=2.18)*/
-    unchar      ai_reserved[3];
-} PACKED gdth_arrayinf_str;
+    u8      ai_drive_entries;               /* number of drive components */
+    u8      ai_protected;                   /* protection flag */
+    u8      ai_verify_state;                /* state of a parity verify */
+    u8      ai_ext_state;                   /* extended array drive state */
+    u8      ai_expand_state;                /* array expand state (>=2.18)*/
+    u8      ai_reserved[3];
+} __attribute__((packed)) gdth_arrayinf_str;
 
 /* get array drive list */
 typedef struct {
-    ulong32     controller_no;                  /* controller no. */
-    unchar      cd_handle;                      /* master cachedrive */
-    unchar      is_arrayd;                      /* Flag: is array drive? */
-    unchar      is_master;                      /* Flag: is array master? */
-    unchar      is_parity;                      /* Flag: is parity drive? */
-    unchar      is_hotfix;                      /* Flag: is hotfix drive? */
-    unchar      res[3];
-} PACKED gdth_alist_str;
+    u32     controller_no;                  /* controller no. */
+    u8      cd_handle;                      /* master cachedrive */
+    u8      is_arrayd;                      /* Flag: is array drive? */
+    u8      is_master;                      /* Flag: is array master? */
+    u8      is_parity;                      /* Flag: is parity drive? */
+    u8      is_hotfix;                      /* Flag: is hotfix drive? */
+    u8      res[3];
+} __attribute__((packed)) gdth_alist_str;
 
 typedef struct {
-    ulong32     entries_avail;                  /* allocated entries */
-    ulong32     entries_init;                   /* returned entries */
-    ulong32     first_entry;                    /* first entry number */
-    ulong32     list_offset;                    /* offset of following list */
+    u32     entries_avail;                  /* allocated entries */
+    u32     entries_init;                   /* returned entries */
+    u32     first_entry;                    /* first entry number */
+    u32     list_offset;                    /* offset of following list */
     gdth_alist_str list[1];                     /* list */
-} PACKED gdth_arcdl_str;
+} __attribute__((packed)) gdth_arcdl_str;
 
 /* cache info/config IOCTL */
 typedef struct {
-    ulong32     version;                        /* firmware version */
-    ushort      state;                          /* cache state (on/off) */
-    ushort      strategy;                       /* cache strategy */
-    ushort      write_back;                     /* write back state (on/off) */
-    ushort      block_size;                     /* cache block size */
-} PACKED gdth_cpar_str;
+    u32     version;                        /* firmware version */
+    u16      state;                          /* cache state (on/off) */
+    u16      strategy;                       /* cache strategy */
+    u16      write_back;                     /* write back state (on/off) */
+    u16      block_size;                     /* cache block size */
+} __attribute__((packed)) gdth_cpar_str;
 
 typedef struct {
-    ulong32     csize;                          /* cache size */
-    ulong32     read_cnt;                       /* read/write counter */
-    ulong32     write_cnt;
-    ulong32     tr_hits;                        /* hits */
-    ulong32     sec_hits;
-    ulong32     sec_miss;                       /* misses */
-} PACKED gdth_cstat_str;
+    u32     csize;                          /* cache size */
+    u32     read_cnt;                       /* read/write counter */
+    u32     write_cnt;
+    u32     tr_hits;                        /* hits */
+    u32     sec_hits;
+    u32     sec_miss;                       /* misses */
+} __attribute__((packed)) gdth_cstat_str;
 
 typedef struct {
     gdth_cpar_str   cpar;
     gdth_cstat_str  cstat;
-} PACKED gdth_cinfo_str;
+} __attribute__((packed)) gdth_cinfo_str;
 
 /* cache drive info */
 typedef struct {
-    unchar      cd_name[8];                     /* cache drive name */
-    ulong32     cd_devtype;                     /* SCSI devicetype */
-    ulong32     cd_ldcnt;                       /* number of log. drives */
-    ulong32     cd_last_error;                  /* last error */
-    unchar      cd_initialized;                 /* drive is initialized */
-    unchar      cd_removable;                   /* media is removable */
-    unchar      cd_write_protected;             /* write protected */
-    unchar      cd_flags;                       /* Pool Hot Fix? */
-    ulong32     ld_blkcnt;                      /* number of blocks */
-    ulong32     ld_blksize;                     /* blocksize */
-    ulong32     ld_dcnt;                        /* number of disks */
-    ulong32     ld_slave;                       /* log. drive index */
-    ulong32     ld_dtype;                       /* type of logical drive */
-    ulong32     ld_last_error;                  /* last error */
-    unchar      ld_name[8];                     /* log. drive name */
-    unchar      ld_error;                       /* error */
-} PACKED gdth_cdrinfo_str;
+    u8      cd_name[8];                     /* cache drive name */
+    u32     cd_devtype;                     /* SCSI devicetype */
+    u32     cd_ldcnt;                       /* number of log. drives */
+    u32     cd_last_error;                  /* last error */
+    u8      cd_initialized;                 /* drive is initialized */
+    u8      cd_removable;                   /* media is removable */
+    u8      cd_write_protected;             /* write protected */
+    u8      cd_flags;                       /* Pool Hot Fix? */
+    u32     ld_blkcnt;                      /* number of blocks */
+    u32     ld_blksize;                     /* blocksize */
+    u32     ld_dcnt;                        /* number of disks */
+    u32     ld_slave;                       /* log. drive index */
+    u32     ld_dtype;                       /* type of logical drive */
+    u32     ld_last_error;                  /* last error */
+    u8      ld_name[8];                     /* log. drive name */
+    u8      ld_error;                       /* error */
+} __attribute__((packed)) gdth_cdrinfo_str;
 
 /* OEM string */
 typedef struct {
-    ulong32     ctl_version;
-    ulong32     file_major_version;
-    ulong32     file_minor_version;
-    ulong32     buffer_size;
-    ulong32     cpy_count;
-    ulong32     ext_error;
-    ulong32     oem_id;
-    ulong32     board_id;
-} PACKED gdth_oem_str_params;
+    u32     ctl_version;
+    u32     file_major_version;
+    u32     file_minor_version;
+    u32     buffer_size;
+    u32     cpy_count;
+    u32     ext_error;
+    u32     oem_id;
+    u32     board_id;
+} __attribute__((packed)) gdth_oem_str_params;
 
 typedef struct {
-    unchar      product_0_1_name[16];
-    unchar      product_4_5_name[16];
-    unchar      product_cluster_name[16];
-    unchar      product_reserved[16];
-    unchar      scsi_cluster_target_vendor_id[16];
-    unchar      cluster_raid_fw_name[16];
-    unchar      oem_brand_name[16];
-    unchar      oem_raid_type[16];
-    unchar      bios_type[13];
-    unchar      bios_title[50];
-    unchar      oem_company_name[37];
-    ulong32     pci_id_1;
-    ulong32     pci_id_2;
-    unchar      validation_status[80];
-    unchar      reserved_1[4];
-    unchar      scsi_host_drive_inquiry_vendor_id[16];
-    unchar      library_file_template[16];
-    unchar      reserved_2[16];
-    unchar      tool_name_1[32];
-    unchar      tool_name_2[32];
-    unchar      tool_name_3[32];
-    unchar      oem_contact_1[84];
-    unchar      oem_contact_2[84];
-    unchar      oem_contact_3[84];
-} PACKED gdth_oem_str;
+    u8      product_0_1_name[16];
+    u8      product_4_5_name[16];
+    u8      product_cluster_name[16];
+    u8      product_reserved[16];
+    u8      scsi_cluster_target_vendor_id[16];
+    u8      cluster_raid_fw_name[16];
+    u8      oem_brand_name[16];
+    u8      oem_raid_type[16];
+    u8      bios_type[13];
+    u8      bios_title[50];
+    u8      oem_company_name[37];
+    u32     pci_id_1;
+    u32     pci_id_2;
+    u8      validation_status[80];
+    u8      reserved_1[4];
+    u8      scsi_host_drive_inquiry_vendor_id[16];
+    u8      library_file_template[16];
+    u8      reserved_2[16];
+    u8      tool_name_1[32];
+    u8      tool_name_2[32];
+    u8      tool_name_3[32];
+    u8      oem_contact_1[84];
+    u8      oem_contact_2[84];
+    u8      oem_contact_3[84];
+} __attribute__((packed)) gdth_oem_str;
 
 typedef struct {
     gdth_oem_str_params params;
     gdth_oem_str        text;
-} PACKED gdth_oem_str_ioctl;
+} __attribute__((packed)) gdth_oem_str_ioctl;
 
 /* board features */
 typedef struct {
-    unchar      chaining;                       /* Chaining supported */
-    unchar      striping;                       /* Striping (RAID-0) supp. */
-    unchar      mirroring;                      /* Mirroring (RAID-1) supp. */
-    unchar      raid;                           /* RAID-4/5/10 supported */
-} PACKED gdth_bfeat_str;
+    u8      chaining;                       /* Chaining supported */
+    u8      striping;                       /* Striping (RAID-0) supp. */
+    u8      mirroring;                      /* Mirroring (RAID-1) supp. */
+    u8      raid;                           /* RAID-4/5/10 supported */
+} __attribute__((packed)) gdth_bfeat_str;
 
 /* board info IOCTL */
 typedef struct {
-    ulong32     ser_no;                         /* serial no. */
-    unchar      oem_id[2];                      /* OEM ID */
-    ushort      ep_flags;                       /* eprom flags */
-    ulong32     proc_id;                        /* processor ID */
-    ulong32     memsize;                        /* memory size (bytes) */
-    unchar      mem_banks;                      /* memory banks */
-    unchar      chan_type;                      /* channel type */
-    unchar      chan_count;                     /* channel count */
-    unchar      rdongle_pres;                   /* dongle present? */
-    ulong32     epr_fw_ver;                     /* (eprom) firmware version */
-    ulong32     upd_fw_ver;                     /* (update) firmware version */
-    ulong32     upd_revision;                   /* update revision */
+    u32     ser_no;                         /* serial no. */
+    u8      oem_id[2];                      /* OEM ID */
+    u16      ep_flags;                       /* eprom flags */
+    u32     proc_id;                        /* processor ID */
+    u32     memsize;                        /* memory size (bytes) */
+    u8      mem_banks;                      /* memory banks */
+    u8      chan_type;                      /* channel type */
+    u8      chan_count;                     /* channel count */
+    u8      rdongle_pres;                   /* dongle present? */
+    u32     epr_fw_ver;                     /* (eprom) firmware version */
+    u32     upd_fw_ver;                     /* (update) firmware version */
+    u32     upd_revision;                   /* update revision */
     char        type_string[16];                /* controller name */
     char        raid_string[16];                /* RAID firmware name */
-    unchar      update_pres;                    /* update present? */
-    unchar      xor_pres;                       /* XOR engine present? */
-    unchar      prom_type;                      /* ROM type (eprom/flash) */
-    unchar      prom_count;                     /* number of ROM devices */
-    ulong32     dup_pres;                       /* duplexing module present? */
-    ulong32     chan_pres;                      /* number of expansion chn. */
-    ulong32     mem_pres;                       /* memory expansion inst. ? */
-    unchar      ft_bus_system;                  /* fault bus supported? */
-    unchar      subtype_valid;                  /* board_subtype valid? */
-    unchar      board_subtype;                  /* subtype/hardware level */
-    unchar      ramparity_pres;                 /* RAM parity check hardware? */
-} PACKED gdth_binfo_str; 
+    u8      update_pres;                    /* update present? */
+    u8      xor_pres;                       /* XOR engine present? */
+    u8      prom_type;                      /* ROM type (eprom/flash) */
+    u8      prom_count;                     /* number of ROM devices */
+    u32     dup_pres;                       /* duplexing module present? */
+    u32     chan_pres;                      /* number of expansion chn. */
+    u32     mem_pres;                       /* memory expansion inst. ? */
+    u8      ft_bus_system;                  /* fault bus supported? */
+    u8      subtype_valid;                  /* board_subtype valid? */
+    u8      board_subtype;                  /* subtype/hardware level */
+    u8      ramparity_pres;                 /* RAM parity check hardware? */
+} __attribute__((packed)) gdth_binfo_str; 
 
 /* get host drive info */
 typedef struct {
     char        name[8];                        /* host drive name */
-    ulong32     size;                           /* size (sectors) */
-    unchar      host_drive;                     /* host drive number */
-    unchar      log_drive;                      /* log. drive (master) */
-    unchar      reserved;
-    unchar      rw_attribs;                     /* r/w attribs */
-    ulong32     start_sec;                      /* start sector */
-} PACKED gdth_hentry_str;
+    u32     size;                           /* size (sectors) */
+    u8      host_drive;                     /* host drive number */
+    u8      log_drive;                      /* log. drive (master) */
+    u8      reserved;
+    u8      rw_attribs;                     /* r/w attribs */
+    u32     start_sec;                      /* start sector */
+} __attribute__((packed)) gdth_hentry_str;
 
 typedef struct {
-    ulong32     entries;                        /* entry count */
-    ulong32     offset;                         /* offset of entries */
-    unchar      secs_p_head;                    /* sectors/head */
-    unchar      heads_p_cyl;                    /* heads/cylinder */
-    unchar      reserved;
-    unchar      clust_drvtype;                  /* cluster drive type */
-    ulong32     location;                       /* controller number */
+    u32     entries;                        /* entry count */
+    u32     offset;                         /* offset of entries */
+    u8      secs_p_head;                    /* sectors/head */
+    u8      heads_p_cyl;                    /* heads/cylinder */
+    u8      reserved;
+    u8      clust_drvtype;                  /* cluster drive type */
+    u32     location;                       /* controller number */
     gdth_hentry_str entry[MAX_HDRIVES];         /* entries */
-} PACKED gdth_hget_str;    
+} __attribute__((packed)) gdth_hget_str;    
 
 
 /* DPRAM structures */
 
 /* interface area ISA/PCI */
 typedef struct {
-    unchar              S_Cmd_Indx;             /* special command */
-    unchar volatile     S_Status;               /* status special command */
-    ushort              reserved1;
-    ulong32             S_Info[4];              /* add. info special command */
-    unchar volatile     Sema0;                  /* command semaphore */
-    unchar              reserved2[3];
-    unchar              Cmd_Index;              /* command number */
-    unchar              reserved3[3];
-    ushort volatile     Status;                 /* command status */
-    ushort              Service;                /* service(for async.events) */
-    ulong32             Info[2];                /* additional info */
+    u8              S_Cmd_Indx;             /* special command */
+    u8 volatile     S_Status;               /* status special command */
+    u16              reserved1;
+    u32             S_Info[4];              /* add. info special command */
+    u8 volatile     Sema0;                  /* command semaphore */
+    u8              reserved2[3];
+    u8              Cmd_Index;              /* command number */
+    u8              reserved3[3];
+    u16 volatile     Status;                 /* command status */
+    u16              Service;                /* service(for async.events) */
+    u32             Info[2];                /* additional info */
     struct {
-        ushort          offset;                 /* command offs. in the DPRAM*/
-        ushort          serv_id;                /* service */
-    } PACKED comm_queue[MAXOFFSETS];            /* command queue */
-    ulong32             bios_reserved[2];
-    unchar              gdt_dpr_cmd[1];         /* commands */
-} PACKED gdt_dpr_if;
+        u16          offset;                 /* command offs. in the DPRAM*/
+        u16          serv_id;                /* service */
+    } __attribute__((packed)) comm_queue[MAXOFFSETS];            /* command queue */
+    u32             bios_reserved[2];
+    u8              gdt_dpr_cmd[1];         /* commands */
+} __attribute__((packed)) gdt_dpr_if;
 
 /* SRAM structure PCI controllers */
 typedef struct {
-    ulong32     magic;                          /* controller ID from BIOS */
-    ushort      need_deinit;                    /* switch betw. BIOS/driver */
-    unchar      switch_support;                 /* see need_deinit */
-    unchar      padding[9];
-    unchar      os_used[16];                    /* OS code per service */
-    unchar      unused[28];
-    unchar      fw_magic;                       /* contr. ID from firmware */
-} PACKED gdt_pci_sram;
+    u32     magic;                          /* controller ID from BIOS */
+    u16      need_deinit;                    /* switch betw. BIOS/driver */
+    u8      switch_support;                 /* see need_deinit */
+    u8      padding[9];
+    u8      os_used[16];                    /* OS code per service */
+    u8      unused[28];
+    u8      fw_magic;                       /* contr. ID from firmware */
+} __attribute__((packed)) gdt_pci_sram;
 
 /* SRAM structure EISA controllers (but NOT GDT3000/3020) */
 typedef struct {
-    unchar      os_used[16];                    /* OS code per service */
-    ushort      need_deinit;                    /* switch betw. BIOS/driver */
-    unchar      switch_support;                 /* see need_deinit */
-    unchar      padding;
-} PACKED gdt_eisa_sram;
+    u8      os_used[16];                    /* OS code per service */
+    u16      need_deinit;                    /* switch betw. BIOS/driver */
+    u8      switch_support;                 /* see need_deinit */
+    u8      padding;
+} __attribute__((packed)) gdt_eisa_sram;
 
 
 /* DPRAM ISA controllers */
 typedef struct {
     union {
         struct {
-            unchar      bios_used[0x3c00-32];   /* 15KB - 32Bytes BIOS */
-            ulong32     magic;                  /* controller (EISA) ID */
-            ushort      need_deinit;            /* switch betw. BIOS/driver */
-            unchar      switch_support;         /* see need_deinit */
-            unchar      padding[9];
-            unchar      os_used[16];            /* OS code per service */
-        } PACKED dp_sram;
-        unchar          bios_area[0x4000];      /* 16KB reserved for BIOS */
+            u8      bios_used[0x3c00-32];   /* 15KB - 32Bytes BIOS */
+            u32     magic;                  /* controller (EISA) ID */
+            u16      need_deinit;            /* switch betw. BIOS/driver */
+            u8      switch_support;         /* see need_deinit */
+            u8      padding[9];
+            u8      os_used[16];            /* OS code per service */
+        } __attribute__((packed)) dp_sram;
+        u8          bios_area[0x4000];      /* 16KB reserved for BIOS */
     } bu;
     union {
         gdt_dpr_if      ic;                     /* interface area */
-        unchar          if_area[0x3000];        /* 12KB for interface */
+        u8          if_area[0x3000];        /* 12KB for interface */
     } u;
     struct {
-        unchar          memlock;                /* write protection DPRAM */
-        unchar          event;                  /* release event */
-        unchar          irqen;                  /* board interrupts enable */
-        unchar          irqdel;                 /* acknowledge board int. */
-        unchar volatile Sema1;                  /* status semaphore */
-        unchar          rq;                     /* IRQ/DRQ configuration */
-    } PACKED io;
-} PACKED gdt2_dpram_str;
+        u8          memlock;                /* write protection DPRAM */
+        u8          event;                  /* release event */
+        u8          irqen;                  /* board interrupts enable */
+        u8          irqdel;                 /* acknowledge board int. */
+        u8 volatile Sema1;                  /* status semaphore */
+        u8          rq;                     /* IRQ/DRQ configuration */
+    } __attribute__((packed)) io;
+} __attribute__((packed)) gdt2_dpram_str;
 
 /* DPRAM PCI controllers */
 typedef struct {
     union {
         gdt_dpr_if      ic;                     /* interface area */
-        unchar          if_area[0xff0-sizeof(gdt_pci_sram)];
+        u8          if_area[0xff0-sizeof(gdt_pci_sram)];
     } u;
     gdt_pci_sram        gdt6sr;                 /* SRAM structure */
     struct {
-        unchar          unused0[1];
-        unchar volatile Sema1;                  /* command semaphore */
-        unchar          unused1[3];
-        unchar          irqen;                  /* board interrupts enable */
-        unchar          unused2[2];
-        unchar          event;                  /* release event */
-        unchar          unused3[3];
-        unchar          irqdel;                 /* acknowledge board int. */
-        unchar          unused4[3];
-    } PACKED io;
-} PACKED gdt6_dpram_str;
+        u8          unused0[1];
+        u8 volatile Sema1;                  /* command semaphore */
+        u8          unused1[3];
+        u8          irqen;                  /* board interrupts enable */
+        u8          unused2[2];
+        u8          event;                  /* release event */
+        u8          unused3[3];
+        u8          irqdel;                 /* acknowledge board int. */
+        u8          unused4[3];
+    } __attribute__((packed)) io;
+} __attribute__((packed)) gdt6_dpram_str;
 
 /* PLX register structure (new PCI controllers) */
 typedef struct {
-    unchar              cfg_reg;        /* DPRAM cfg.(2:below 1MB,0:anywhere)*/
-    unchar              unused1[0x3f];
-    unchar volatile     sema0_reg;              /* command semaphore */
-    unchar volatile     sema1_reg;              /* status semaphore */
-    unchar              unused2[2];
-    ushort volatile     status;                 /* command status */
-    ushort              service;                /* service */
-    ulong32             info[2];                /* additional info */
-    unchar              unused3[0x10];
-    unchar              ldoor_reg;              /* PCI to local doorbell */
-    unchar              unused4[3];
-    unchar volatile     edoor_reg;              /* local to PCI doorbell */
-    unchar              unused5[3];
-    unchar              control0;               /* control0 register(unused) */
-    unchar              control1;               /* board interrupts enable */
-    unchar              unused6[0x16];
-} PACKED gdt6c_plx_regs;
+    u8              cfg_reg;        /* DPRAM cfg.(2:below 1MB,0:anywhere)*/
+    u8              unused1[0x3f];
+    u8 volatile     sema0_reg;              /* command semaphore */
+    u8 volatile     sema1_reg;              /* status semaphore */
+    u8              unused2[2];
+    u16 volatile     status;                 /* command status */
+    u16              service;                /* service */
+    u32             info[2];                /* additional info */
+    u8              unused3[0x10];
+    u8              ldoor_reg;              /* PCI to local doorbell */
+    u8              unused4[3];
+    u8 volatile     edoor_reg;              /* local to PCI doorbell */
+    u8              unused5[3];
+    u8              control0;               /* control0 register(unused) */
+    u8              control1;               /* board interrupts enable */
+    u8              unused6[0x16];
+} __attribute__((packed)) gdt6c_plx_regs;
 
 /* DPRAM new PCI controllers */
 typedef struct {
     union {
         gdt_dpr_if      ic;                     /* interface area */
-        unchar          if_area[0x4000-sizeof(gdt_pci_sram)];
+        u8          if_area[0x4000-sizeof(gdt_pci_sram)];
     } u;
     gdt_pci_sram        gdt6sr;                 /* SRAM structure */
-} PACKED gdt6c_dpram_str;
+} __attribute__((packed)) gdt6c_dpram_str;
 
 /* i960 register structure (PCI MPR controllers) */
 typedef struct {
-    unchar              unused1[16];
-    unchar volatile     sema0_reg;              /* command semaphore */
-    unchar              unused2;
-    unchar volatile     sema1_reg;              /* status semaphore */
-    unchar              unused3;
-    ushort volatile     status;                 /* command status */
-    ushort              service;                /* service */
-    ulong32             info[2];                /* additional info */
-    unchar              ldoor_reg;              /* PCI to local doorbell */
-    unchar              unused4[11];
-    unchar volatile     edoor_reg;              /* local to PCI doorbell */
-    unchar              unused5[7];
-    unchar              edoor_en_reg;           /* board interrupts enable */
-    unchar              unused6[27];
-    ulong32             unused7[939];         
-    ulong32             severity;       
+    u8              unused1[16];
+    u8 volatile     sema0_reg;              /* command semaphore */
+    u8              unused2;
+    u8 volatile     sema1_reg;              /* status semaphore */
+    u8              unused3;
+    u16 volatile     status;                 /* command status */
+    u16              service;                /* service */
+    u32             info[2];                /* additional info */
+    u8              ldoor_reg;              /* PCI to local doorbell */
+    u8              unused4[11];
+    u8 volatile     edoor_reg;              /* local to PCI doorbell */
+    u8              unused5[7];
+    u8              edoor_en_reg;           /* board interrupts enable */
+    u8              unused6[27];
+    u32             unused7[939];         
+    u32             severity;       
     char                evt_str[256];           /* event string */
-} PACKED gdt6m_i960_regs;
+} __attribute__((packed)) gdt6m_i960_regs;
 
 /* DPRAM PCI MPR controllers */
 typedef struct {
     gdt6m_i960_regs     i960r;                  /* 4KB i960 registers */
     union {
         gdt_dpr_if      ic;                     /* interface area */
-        unchar          if_area[0x3000-sizeof(gdt_pci_sram)];
+        u8          if_area[0x3000-sizeof(gdt_pci_sram)];
     } u;
     gdt_pci_sram        gdt6sr;                 /* SRAM structure */
-} PACKED gdt6m_dpram_str;
+} __attribute__((packed)) gdt6m_dpram_str;
 
 
 /* PCI resources */
 typedef struct {
     struct pci_dev      *pdev;
-    ulong               dpmem;                  /* DPRAM address */
-    ulong               io;                     /* IO address */
+    unsigned long               dpmem;                  /* DPRAM address */
+    unsigned long               io;                     /* IO address */
 } gdth_pci_str;
 
 
@@ -846,93 +846,93 @@
 typedef struct {
     struct Scsi_Host    *shost;
     struct list_head    list;
-    ushort      	hanum;
-    ushort              oem_id;                 /* OEM */
-    ushort              type;                   /* controller class */
-    ulong32             stype;                  /* subtype (PCI: device ID) */
-    ushort              fw_vers;                /* firmware version */
-    ushort              cache_feat;             /* feat. cache serv. (s/g,..)*/
-    ushort              raw_feat;               /* feat. raw service (s/g,..)*/
-    ushort              screen_feat;            /* feat. raw service (s/g,..)*/
-    ushort              bmic;                   /* BMIC address (EISA) */
+    u16      	hanum;
+    u16              oem_id;                 /* OEM */
+    u16              type;                   /* controller class */
+    u32             stype;                  /* subtype (PCI: device ID) */
+    u16              fw_vers;                /* firmware version */
+    u16              cache_feat;             /* feat. cache serv. (s/g,..)*/
+    u16              raw_feat;               /* feat. raw service (s/g,..)*/
+    u16              screen_feat;            /* feat. raw service (s/g,..)*/
+    u16              bmic;                   /* BMIC address (EISA) */
     void __iomem        *brd;                   /* DPRAM address */
-    ulong32             brd_phys;               /* slot number/BIOS address */
+    u32             brd_phys;               /* slot number/BIOS address */
     gdt6c_plx_regs      *plx;                   /* PLX regs (new PCI contr.) */
     gdth_cmd_str        cmdext;
     gdth_cmd_str        *pccb;                  /* address command structure */
-    ulong32             ccb_phys;               /* phys. address */
+    u32             ccb_phys;               /* phys. address */
 #ifdef INT_COAL
     gdth_coal_status    *coal_stat;             /* buffer for coalescing int.*/
-    ulong64             coal_stat_phys;         /* phys. address */
+    u64             coal_stat_phys;         /* phys. address */
 #endif
     char                *pscratch;              /* scratch (DMA) buffer */
-    ulong64             scratch_phys;           /* phys. address */
-    unchar              scratch_busy;           /* in use? */
-    unchar              dma64_support;          /* 64-bit DMA supported? */
+    u64             scratch_phys;           /* phys. address */
+    u8              scratch_busy;           /* in use? */
+    u8              dma64_support;          /* 64-bit DMA supported? */
     gdth_msg_str        *pmsg;                  /* message buffer */
-    ulong64             msg_phys;               /* phys. address */
-    unchar              scan_mode;              /* current scan mode */
-    unchar              irq;                    /* IRQ */
-    unchar              drq;                    /* DRQ (ISA controllers) */
-    ushort              status;                 /* command status */
-    ushort              service;                /* service/firmware ver./.. */
-    ulong32             info;
-    ulong32             info2;                  /* additional info */
+    u64             msg_phys;               /* phys. address */
+    u8              scan_mode;              /* current scan mode */
+    u8              irq;                    /* IRQ */
+    u8              drq;                    /* DRQ (ISA controllers) */
+    u16              status;                 /* command status */
+    u16              service;                /* service/firmware ver./.. */
+    u32             info;
+    u32             info2;                  /* additional info */
     Scsi_Cmnd           *req_first;             /* top of request queue */
     struct {
-        unchar          present;                /* Flag: host drive present? */
-        unchar          is_logdrv;              /* Flag: log. drive (master)? */
-        unchar          is_arraydrv;            /* Flag: array drive? */
-        unchar          is_master;              /* Flag: array drive master? */
-        unchar          is_parity;              /* Flag: parity drive? */
-        unchar          is_hotfix;              /* Flag: hotfix drive? */
-        unchar          master_no;              /* number of master drive */
-        unchar          lock;                   /* drive locked? (hot plug) */
-        unchar          heads;                  /* mapping */
-        unchar          secs;
-        ushort          devtype;                /* further information */
-        ulong64         size;                   /* capacity */
-        unchar          ldr_no;                 /* log. drive no. */
-        unchar          rw_attribs;             /* r/w attributes */
-        unchar          cluster_type;           /* cluster properties */
-        unchar          media_changed;          /* Flag:MOUNT/UNMOUNT occured */
-        ulong32         start_sec;              /* start sector */
+        u8          present;                /* Flag: host drive present? */
+        u8          is_logdrv;              /* Flag: log. drive (master)? */
+        u8          is_arraydrv;            /* Flag: array drive? */
+        u8          is_master;              /* Flag: array drive master? */
+        u8          is_parity;              /* Flag: parity drive? */
+        u8          is_hotfix;              /* Flag: hotfix drive? */
+        u8          master_no;              /* number of master drive */
+        u8          lock;                   /* drive locked? (hot plug) */
+        u8          heads;                  /* mapping */
+        u8          secs;
+        u16          devtype;                /* further information */
+        u64         size;                   /* capacity */
+        u8          ldr_no;                 /* log. drive no. */
+        u8          rw_attribs;             /* r/w attributes */
+        u8          cluster_type;           /* cluster properties */
+        u8          media_changed;          /* Flag:MOUNT/UNMOUNT occured */
+        u32         start_sec;              /* start sector */
     } hdr[MAX_LDRIVES];                         /* host drives */
     struct {
-        unchar          lock;                   /* channel locked? (hot plug) */
-        unchar          pdev_cnt;               /* physical device count */
-        unchar          local_no;               /* local channel number */
-        unchar          io_cnt[MAXID];          /* current IO count */
-        ulong32         address;                /* channel address */
-        ulong32         id_list[MAXID];         /* IDs of the phys. devices */
+        u8          lock;                   /* channel locked? (hot plug) */
+        u8          pdev_cnt;               /* physical device count */
+        u8          local_no;               /* local channel number */
+        u8          io_cnt[MAXID];          /* current IO count */
+        u32         address;                /* channel address */
+        u32         id_list[MAXID];         /* IDs of the phys. devices */
     } raw[MAXBUS];                              /* SCSI channels */
     struct {
         Scsi_Cmnd       *cmnd;                  /* pending request */
-        ushort          service;                /* service */
+        u16          service;                /* service */
     } cmd_tab[GDTH_MAXCMDS];                    /* table of pend. requests */
     struct gdth_cmndinfo {                      /* per-command private info */
         int index;
         int internal_command;                   /* don't call scsi_done */
         gdth_cmd_str *internal_cmd_str;         /* crier for internal messages*/
         dma_addr_t sense_paddr;                 /* sense dma-addr */
-        unchar priority;
+        u8 priority;
 	int timeout_count;			/* # of timeout calls */
         volatile int wait_for_completion;
-        ushort status;
-        ulong32 info;
+        u16 status;
+        u32 info;
         enum dma_data_direction dma_dir;
         int phase;                              /* ???? */
         int OpCode;
     } cmndinfo[GDTH_MAXCMDS];                   /* index==0 is free */
-    unchar              bus_cnt;                /* SCSI bus count */
-    unchar              tid_cnt;                /* Target ID count */
-    unchar              bus_id[MAXBUS];         /* IOP IDs */
-    unchar              virt_bus;               /* number of virtual bus */
-    unchar              more_proc;              /* more /proc info supported */
-    ushort              cmd_cnt;                /* command count in DPRAM */
-    ushort              cmd_len;                /* length of actual command */
-    ushort              cmd_offs_dpmem;         /* actual offset in DPRAM */
-    ushort              ic_all_size;            /* sizeof DPRAM interf. area */
+    u8              bus_cnt;                /* SCSI bus count */
+    u8              tid_cnt;                /* Target ID count */
+    u8              bus_id[MAXBUS];         /* IOP IDs */
+    u8              virt_bus;               /* number of virtual bus */
+    u8              more_proc;              /* more /proc info supported */
+    u16              cmd_cnt;                /* command count in DPRAM */
+    u16              cmd_len;                /* length of actual command */
+    u16              cmd_offs_dpmem;         /* actual offset in DPRAM */
+    u16              ic_all_size;            /* sizeof DPRAM interf. area */
     gdth_cpar_str       cpar;                   /* controller cache par. */
     gdth_bfeat_str      bfeat;                  /* controller features */
     gdth_binfo_str      binfo;                  /* controller info */
@@ -941,7 +941,7 @@
     struct pci_dev      *pdev;
     char                oem_name[8];
 #ifdef GDTH_DMA_STATISTICS
-    ulong               dma32_cnt, dma64_cnt;   /* statistics: DMA buffer */
+    unsigned long               dma32_cnt, dma64_cnt;   /* statistics: DMA buffer */
 #endif
     struct scsi_device         *sdev;
 } gdth_ha_str;
@@ -953,65 +953,65 @@
 
 /* INQUIRY data format */
 typedef struct {
-    unchar      type_qual;
-    unchar      modif_rmb;
-    unchar      version;
-    unchar      resp_aenc;
-    unchar      add_length;
-    unchar      reserved1;
-    unchar      reserved2;
-    unchar      misc;
-    unchar      vendor[8];
-    unchar      product[16];
-    unchar      revision[4];
-} PACKED gdth_inq_data;
+    u8      type_qual;
+    u8      modif_rmb;
+    u8      version;
+    u8      resp_aenc;
+    u8      add_length;
+    u8      reserved1;
+    u8      reserved2;
+    u8      misc;
+    u8      vendor[8];
+    u8      product[16];
+    u8      revision[4];
+} __attribute__((packed)) gdth_inq_data;
 
 /* READ_CAPACITY data format */
 typedef struct {
-    ulong32     last_block_no;
-    ulong32     block_length;
-} PACKED gdth_rdcap_data;
+    u32     last_block_no;
+    u32     block_length;
+} __attribute__((packed)) gdth_rdcap_data;
 
 /* READ_CAPACITY (16) data format */
 typedef struct {
-    ulong64     last_block_no;
-    ulong32     block_length;
-} PACKED gdth_rdcap16_data;
+    u64     last_block_no;
+    u32     block_length;
+} __attribute__((packed)) gdth_rdcap16_data;
 
 /* REQUEST_SENSE data format */
 typedef struct {
-    unchar      errorcode;
-    unchar      segno;
-    unchar      key;
-    ulong32     info;
-    unchar      add_length;
-    ulong32     cmd_info;
-    unchar      adsc;
-    unchar      adsq;
-    unchar      fruc;
-    unchar      key_spec[3];
-} PACKED gdth_sense_data;
+    u8      errorcode;
+    u8      segno;
+    u8      key;
+    u32     info;
+    u8      add_length;
+    u32     cmd_info;
+    u8      adsc;
+    u8      adsq;
+    u8      fruc;
+    u8      key_spec[3];
+} __attribute__((packed)) gdth_sense_data;
 
 /* MODE_SENSE data format */
 typedef struct {
     struct {
-        unchar  data_length;
-        unchar  med_type;
-        unchar  dev_par;
-        unchar  bd_length;
-    } PACKED hd;
+        u8  data_length;
+        u8  med_type;
+        u8  dev_par;
+        u8  bd_length;
+    } __attribute__((packed)) hd;
     struct {
-        unchar  dens_code;
-        unchar  block_count[3];
-        unchar  reserved;
-        unchar  block_length[3];
-    } PACKED bd;
-} PACKED gdth_modep_data;
+        u8  dens_code;
+        u8  block_count[3];
+        u8  reserved;
+        u8  block_length[3];
+    } __attribute__((packed)) bd;
+} __attribute__((packed)) gdth_modep_data;
 
 /* stack frame */
 typedef struct {
-    ulong       b[10];                          /* 32/64 bit compiler ! */
-} PACKED gdth_stackframe;
+    unsigned long       b[10];                          /* 32/64 bit compiler ! */
+} __attribute__((packed)) gdth_stackframe;
 
 
 /* function prototyping */
diff --git a/drivers/scsi/gdth_ioctl.h b/drivers/scsi/gdth_ioctl.h
index 783fae7..b004c61 100644
--- a/drivers/scsi/gdth_ioctl.h
+++ b/drivers/scsi/gdth_ioctl.h
@@ -32,109 +32,101 @@
 #define MAX_HDRIVES     MAX_LDRIVES             /* max. host drive count */
 #endif
 
-/* typedefs */
-#ifdef __KERNEL__
-typedef u32     ulong32;
-typedef u64     ulong64;
-#endif
-
-#define PACKED  __attribute__((packed))
-
 /* scatter/gather element */
 typedef struct {
-    ulong32     sg_ptr;                         /* address */
-    ulong32     sg_len;                         /* length */
-} PACKED gdth_sg_str;
+    u32     sg_ptr;                         /* address */
+    u32     sg_len;                         /* length */
+} __attribute__((packed)) gdth_sg_str;
 
 /* scatter/gather element - 64bit addresses */
 typedef struct {
-    ulong64     sg_ptr;                         /* address */
-    ulong32     sg_len;                         /* length */
-} PACKED gdth_sg64_str;
+    u64     sg_ptr;                         /* address */
+    u32     sg_len;                         /* length */
+} __attribute__((packed)) gdth_sg64_str;
 
 /* command structure */
 typedef struct {
-    ulong32     BoardNode;                      /* board node (always 0) */
-    ulong32     CommandIndex;                   /* command number */
-    ushort      OpCode;                         /* the command (READ,..) */
+    u32     BoardNode;                      /* board node (always 0) */
+    u32     CommandIndex;                   /* command number */
+    u16      OpCode;                         /* the command (READ,..) */
     union {
         struct {
-            ushort      DeviceNo;               /* number of cache drive */
-            ulong32     BlockNo;                /* block number */
-            ulong32     BlockCnt;               /* block count */
-            ulong32     DestAddr;               /* dest. addr. (if s/g: -1) */
-            ulong32     sg_canz;                /* s/g element count */
+            u16      DeviceNo;               /* number of cache drive */
+            u32     BlockNo;                /* block number */
+            u32     BlockCnt;               /* block count */
+            u32     DestAddr;               /* dest. addr. (if s/g: -1) */
+            u32     sg_canz;                /* s/g element count */
             gdth_sg_str sg_lst[GDTH_MAXSG];     /* s/g list */
-        } PACKED cache;                         /* cache service cmd. str. */
+        } __attribute__((packed)) cache;                         /* cache service cmd. str. */
         struct {
-            ushort      DeviceNo;               /* number of cache drive */
-            ulong64     BlockNo;                /* block number */
-            ulong32     BlockCnt;               /* block count */
-            ulong64     DestAddr;               /* dest. addr. (if s/g: -1) */
-            ulong32     sg_canz;                /* s/g element count */
+            u16      DeviceNo;               /* number of cache drive */
+            u64     BlockNo;                /* block number */
+            u32     BlockCnt;               /* block count */
+            u64     DestAddr;               /* dest. addr. (if s/g: -1) */
+            u32     sg_canz;                /* s/g element count */
             gdth_sg64_str sg_lst[GDTH_MAXSG];   /* s/g list */
-        } PACKED cache64;                       /* cache service cmd. str. */
+        } __attribute__((packed)) cache64;                       /* cache service cmd. str. */
         struct {
-            ushort      param_size;             /* size of p_param buffer */
-            ulong32     subfunc;                /* IOCTL function */
-            ulong32     channel;                /* device */
-            ulong64     p_param;                /* buffer */
-        } PACKED ioctl;                         /* IOCTL command structure */
+            u16      param_size;             /* size of p_param buffer */
+            u32     subfunc;                /* IOCTL function */
+            u32     channel;                /* device */
+            u64     p_param;                /* buffer */
+        } __attribute__((packed)) ioctl;                         /* IOCTL command structure */
         struct {
-            ushort      reserved;
+            u16      reserved;
             union {
                 struct {
-                    ulong32  msg_handle;        /* message handle */
-                    ulong64  msg_addr;          /* message buffer address */
-                } PACKED msg;
-                unchar       data[12];          /* buffer for rtc data, ... */
+                    u32  msg_handle;        /* message handle */
+                    u64  msg_addr;          /* message buffer address */
+                } __attribute__((packed)) msg;
+                u8       data[12];          /* buffer for rtc data, ... */
             } su;
-        } PACKED screen;                        /* screen service cmd. str. */
+        } __attribute__((packed)) screen;                        /* screen service cmd. str. */
         struct {
-            ushort      reserved;
-            ulong32     direction;              /* data direction */
-            ulong32     mdisc_time;             /* disc. time (0: no timeout)*/
-            ulong32     mcon_time;              /* connect time(0: no to.) */
-            ulong32     sdata;                  /* dest. addr. (if s/g: -1) */
-            ulong32     sdlen;                  /* data length (bytes) */
-            ulong32     clen;                   /* SCSI cmd. length(6,10,12) */
-            unchar      cmd[12];                /* SCSI command */
-            unchar      target;                 /* target ID */
-            unchar      lun;                    /* LUN */
-            unchar      bus;                    /* SCSI bus number */
-            unchar      priority;               /* only 0 used */
-            ulong32     sense_len;              /* sense data length */
-            ulong32     sense_data;             /* sense data addr. */
-            ulong32     link_p;                 /* linked cmds (not supp.) */
-            ulong32     sg_ranz;                /* s/g element count */
+            u16      reserved;
+            u32     direction;              /* data direction */
+            u32     mdisc_time;             /* disc. time (0: no timeout)*/
+            u32     mcon_time;              /* connect time(0: no to.) */
+            u32     sdata;                  /* dest. addr. (if s/g: -1) */
+            u32     sdlen;                  /* data length (bytes) */
+            u32     clen;                   /* SCSI cmd. length(6,10,12) */
+            u8      cmd[12];                /* SCSI command */
+            u8      target;                 /* target ID */
+            u8      lun;                    /* LUN */
+            u8      bus;                    /* SCSI bus number */
+            u8      priority;               /* only 0 used */
+            u32     sense_len;              /* sense data length */
+            u32     sense_data;             /* sense data addr. */
+            u32     link_p;                 /* linked cmds (not supp.) */
+            u32     sg_ranz;                /* s/g element count */
             gdth_sg_str sg_lst[GDTH_MAXSG];     /* s/g list */
-        } PACKED raw;                           /* raw service cmd. struct. */
+        } __attribute__((packed)) raw;                           /* raw service cmd. struct. */
         struct {
-            ushort      reserved;
-            ulong32     direction;              /* data direction */
-            ulong32     mdisc_time;             /* disc. time (0: no timeout)*/
-            ulong32     mcon_time;              /* connect time(0: no to.) */
-            ulong64     sdata;                  /* dest. addr. (if s/g: -1) */
-            ulong32     sdlen;                  /* data length (bytes) */
-            ulong32     clen;                   /* SCSI cmd. length(6,..,16) */
-            unchar      cmd[16];                /* SCSI command */
-            unchar      target;                 /* target ID */
-            unchar      lun;                    /* LUN */
-            unchar      bus;                    /* SCSI bus number */
-            unchar      priority;               /* only 0 used */
-            ulong32     sense_len;              /* sense data length */
-            ulong64     sense_data;             /* sense data addr. */
-            ulong32     sg_ranz;                /* s/g element count */
+            u16      reserved;
+            u32     direction;              /* data direction */
+            u32     mdisc_time;             /* disc. time (0: no timeout)*/
+            u32     mcon_time;              /* connect time(0: no to.) */
+            u64     sdata;                  /* dest. addr. (if s/g: -1) */
+            u32     sdlen;                  /* data length (bytes) */
+            u32     clen;                   /* SCSI cmd. length(6,..,16) */
+            u8      cmd[16];                /* SCSI command */
+            u8      target;                 /* target ID */
+            u8      lun;                    /* LUN */
+            u8      bus;                    /* SCSI bus number */
+            u8      priority;               /* only 0 used */
+            u32     sense_len;              /* sense data length */
+            u64     sense_data;             /* sense data addr. */
+            u32     sg_ranz;                /* s/g element count */
             gdth_sg64_str sg_lst[GDTH_MAXSG];   /* s/g list */
-        } PACKED raw64;                         /* raw service cmd. struct. */
+        } __attribute__((packed)) raw64;                         /* raw service cmd. struct. */
     } u;
     /* additional variables */
-    unchar      Service;                        /* controller service */
-    unchar      reserved;
-    ushort      Status;                         /* command result */
-    ulong32     Info;                           /* additional information */
+    u8      Service;                        /* controller service */
+    u8      reserved;
+    u16      Status;                         /* command result */
+    u32     Info;                           /* additional information */
     void        *RequestBuffer;                 /* request buffer */
-} PACKED gdth_cmd_str;
+} __attribute__((packed)) gdth_cmd_str;
 
 /* controller event structure */
 #define ES_ASYNC    1
@@ -142,129 +134,129 @@
 #define ES_TEST     3
 #define ES_SYNC     4
 typedef struct {
-    ushort                  size;               /* size of structure */
+    u16                  size;               /* size of structure */
     union {
         char                stream[16];
         struct {
-            ushort          ionode;
-            ushort          service;
-            ulong32         index;
-        } PACKED driver;
+            u16          ionode;
+            u16          service;
+            u32         index;
+        } __attribute__((packed)) driver;
         struct {
-            ushort          ionode;
-            ushort          service;
-            ushort          status;
-            ulong32         info;
-            unchar          scsi_coord[3];
-        } PACKED async;
+            u16          ionode;
+            u16          service;
+            u16          status;
+            u32         info;
+            u8          scsi_coord[3];
+        } __attribute__((packed)) async;
         struct {
-            ushort          ionode;
-            ushort          service;
-            ushort          status;
-            ulong32         info;
-            ushort          hostdrive;
-            unchar          scsi_coord[3];
-            unchar          sense_key;
-        } PACKED sync;
+            u16          ionode;
+            u16          service;
+            u16          status;
+            u32         info;
+            u16          hostdrive;
+            u8          scsi_coord[3];
+            u8          sense_key;
+        } __attribute__((packed)) sync;
         struct {
-            ulong32         l1, l2, l3, l4;
-        } PACKED test;
+            u32         l1, l2, l3, l4;
+        } __attribute__((packed)) test;
     } eu;
-    ulong32                 severity;
-    unchar                  event_string[256];          
-} PACKED gdth_evt_data;
+    u32                 severity;
+    u8                  event_string[256];          
+} __attribute__((packed)) gdth_evt_data;
 
 typedef struct {
-    ulong32         first_stamp;
-    ulong32         last_stamp;
-    ushort          same_count;
-    ushort          event_source;
-    ushort          event_idx;
-    unchar          application;
-    unchar          reserved;
+    u32         first_stamp;
+    u32         last_stamp;
+    u16          same_count;
+    u16          event_source;
+    u16          event_idx;
+    u8          application;
+    u8          reserved;
     gdth_evt_data   event_data;
-} PACKED gdth_evt_str;
+} __attribute__((packed)) gdth_evt_str;
 
 
 #ifdef GDTH_IOCTL_PROC
 /* IOCTL structure (write) */
 typedef struct {
-    ulong32                 magic;              /* IOCTL magic */
-    ushort                  ioctl;              /* IOCTL */
-    ushort                  ionode;             /* controller number */
-    ushort                  service;            /* controller service */
-    ushort                  timeout;            /* timeout */
+    u32                 magic;              /* IOCTL magic */
+    u16                  ioctl;              /* IOCTL */
+    u16                  ionode;             /* controller number */
+    u16                  service;            /* controller service */
+    u16                  timeout;            /* timeout */
     union {
         struct {
-            unchar          command[512];       /* controller command */
-            unchar          data[1];            /* add. data */
+            u8          command[512];       /* controller command */
+            u8          data[1];            /* add. data */
         } general;
         struct {
-            unchar          lock;               /* lock/unlock */
-            unchar          drive_cnt;          /* drive count */
-            ushort          drives[MAX_HDRIVES];/* drives */
+            u8          lock;               /* lock/unlock */
+            u8          drive_cnt;          /* drive count */
+            u16          drives[MAX_HDRIVES];/* drives */
         } lockdrv;
         struct {
-            unchar          lock;               /* lock/unlock */
-            unchar          channel;            /* channel */
+            u8          lock;               /* lock/unlock */
+            u8          channel;            /* channel */
         } lockchn;
         struct {
             int             erase;              /* erase event ? */
             int             handle;
-            unchar          evt[EVENT_SIZE];    /* event structure */
+            u8          evt[EVENT_SIZE];    /* event structure */
         } event;
         struct {
-            unchar          bus;                /* SCSI bus */
-            unchar          target;             /* target ID */
-            unchar          lun;                /* LUN */
-            unchar          cmd_len;            /* command length */
-            unchar          cmd[12];            /* SCSI command */
+            u8          bus;                /* SCSI bus */
+            u8          target;             /* target ID */
+            u8          lun;                /* LUN */
+            u8          cmd_len;            /* command length */
+            u8          cmd[12];            /* SCSI command */
         } scsi;
         struct {
-            ushort          hdr_no;             /* host drive number */
-            unchar          flag;               /* old meth./add/remove */
+            u16          hdr_no;             /* host drive number */
+            u8          flag;               /* old meth./add/remove */
         } rescan;
     } iu;
 } gdth_iowr_str;
 
 /* IOCTL structure (read) */
 typedef struct {
-    ulong32                 size;               /* buffer size */
-    ulong32                 status;             /* IOCTL error code */
+    u32                 size;               /* buffer size */
+    u32                 status;             /* IOCTL error code */
     union {
         struct {
-            unchar          data[1];            /* data */
+            u8          data[1];            /* data */
         } general;
         struct {
-            ushort          version;            /* driver version */
+            u16          version;            /* driver version */
         } drvers;
         struct {
-            unchar          type;               /* controller type */
-            ushort          info;               /* slot etc. */
-            ushort          oem_id;             /* OEM ID */
-            ushort          bios_ver;           /* not used */
-            ushort          access;             /* not used */
-            ushort          ext_type;           /* extended type */
-            ushort          device_id;          /* device ID */
-            ushort          sub_device_id;      /* sub device ID */
+            u8          type;               /* controller type */
+            u16          info;               /* slot etc. */
+            u16          oem_id;             /* OEM ID */
+            u16          bios_ver;           /* not used */
+            u16          access;             /* not used */
+            u16          ext_type;           /* extended type */
+            u16          device_id;          /* device ID */
+            u16          sub_device_id;      /* sub device ID */
         } ctrtype;
         struct {
-            unchar          version;            /* OS version */
-            unchar          subversion;         /* OS subversion */
-            ushort          revision;           /* revision */
+            u8          version;            /* OS version */
+            u8          subversion;         /* OS subversion */
+            u16          revision;           /* revision */
         } osvers;
         struct {
-            ushort          count;              /* controller count */
+            u16          count;              /* controller count */
         } ctrcnt;
         struct {
             int             handle;
-            unchar          evt[EVENT_SIZE];    /* event structure */
+            u8          evt[EVENT_SIZE];    /* event structure */
         } event;
         struct {
-            unchar          bus;                /* SCSI bus, 0xff: invalid */
-            unchar          target;             /* target ID */
-            unchar          lun;                /* LUN */
-            unchar          cluster_type;       /* cluster properties */
+            u8          bus;                /* SCSI bus, 0xff: invalid */
+            u8          target;             /* target ID */
+            u8          lun;                /* LUN */
+            u8          cluster_type;       /* cluster properties */
         } hdr_list[MAX_HDRIVES];                /* index is host drive number */
     } iu;
 } gdth_iord_str;
@@ -272,53 +264,53 @@
 
 /* GDTIOCTL_GENERAL */
 typedef struct {
-    ushort ionode;                              /* controller number */
-    ushort timeout;                             /* timeout */
-    ulong32 info;                               /* error info */ 
-    ushort status;                              /* status */
-    ulong data_len;                             /* data buffer size */
-    ulong sense_len;                            /* sense buffer size */
+    u16 ionode;                              /* controller number */
+    u16 timeout;                             /* timeout */
+    u32 info;                               /* error info */ 
+    u16 status;                              /* status */
+    unsigned long data_len;                             /* data buffer size */
+    unsigned long sense_len;                            /* sense buffer size */
     gdth_cmd_str command;                       /* command */                   
 } gdth_ioctl_general;
 
 /* GDTIOCTL_LOCKDRV */
 typedef struct {
-    ushort ionode;                              /* controller number */
-    unchar lock;                                /* lock/unlock */
-    unchar drive_cnt;                           /* drive count */
-    ushort drives[MAX_HDRIVES];                 /* drives */
+    u16 ionode;                              /* controller number */
+    u8 lock;                                /* lock/unlock */
+    u8 drive_cnt;                           /* drive count */
+    u16 drives[MAX_HDRIVES];                 /* drives */
 } gdth_ioctl_lockdrv;
 
 /* GDTIOCTL_LOCKCHN */
 typedef struct {
-    ushort ionode;                              /* controller number */
-    unchar lock;                                /* lock/unlock */
-    unchar channel;                             /* channel */
+    u16 ionode;                              /* controller number */
+    u8 lock;                                /* lock/unlock */
+    u8 channel;                             /* channel */
 } gdth_ioctl_lockchn;
 
 /* GDTIOCTL_OSVERS */
 typedef struct {
-    unchar version;                             /* OS version */
-    unchar subversion;                          /* OS subversion */
-    ushort revision;                            /* revision */
+    u8 version;                             /* OS version */
+    u8 subversion;                          /* OS subversion */
+    u16 revision;                            /* revision */
 } gdth_ioctl_osvers;
 
 /* GDTIOCTL_CTRTYPE */
 typedef struct {
-    ushort ionode;                              /* controller number */
-    unchar type;                                /* controller type */
-    ushort info;                                /* slot etc. */
-    ushort oem_id;                              /* OEM ID */
-    ushort bios_ver;                            /* not used */
-    ushort access;                              /* not used */
-    ushort ext_type;                            /* extended type */
-    ushort device_id;                           /* device ID */
-    ushort sub_device_id;                       /* sub device ID */
+    u16 ionode;                              /* controller number */
+    u8 type;                                /* controller type */
+    u16 info;                                /* slot etc. */
+    u16 oem_id;                              /* OEM ID */
+    u16 bios_ver;                            /* not used */
+    u16 access;                              /* not used */
+    u16 ext_type;                            /* extended type */
+    u16 device_id;                           /* device ID */
+    u16 sub_device_id;                       /* sub device ID */
 } gdth_ioctl_ctrtype;
 
 /* GDTIOCTL_EVENT */
 typedef struct {
-    ushort ionode;
+    u16 ionode;
     int erase;                                  /* erase event? */
     int handle;                                 /* event handle */
     gdth_evt_str event;
@@ -326,22 +318,22 @@
 
 /* GDTIOCTL_RESCAN/GDTIOCTL_HDRLIST */
 typedef struct {
-    ushort ionode;                              /* controller number */
-    unchar flag;                                /* add/remove */
-    ushort hdr_no;                              /* drive no. */
+    u16 ionode;                              /* controller number */
+    u8 flag;                                /* add/remove */
+    u16 hdr_no;                              /* drive no. */
     struct {
-        unchar bus;                             /* SCSI bus */
-        unchar target;                          /* target ID */
-        unchar lun;                             /* LUN */
-        unchar cluster_type;                    /* cluster properties */
+        u8 bus;                             /* SCSI bus */
+        u8 target;                          /* target ID */
+        u8 lun;                             /* LUN */
+        u8 cluster_type;                    /* cluster properties */
     } hdr_list[MAX_HDRIVES];                    /* index is host drive number */
 } gdth_ioctl_rescan;
 
 /* GDTIOCTL_RESET_BUS/GDTIOCTL_RESET_DRV */
 typedef struct {
-    ushort ionode;                              /* controller number */
-    ushort number;                              /* bus/host drive number */
-    ushort status;                              /* status */
+    u16 ionode;                              /* controller number */
+    u16 number;                              /* bus/host drive number */
+    u16 status;                              /* status */
 } gdth_ioctl_reset;
 
 #endif
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 1258da3..ffb2b21 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -43,7 +43,7 @@
     int i, found;
     gdth_cmd_str    gdtcmd;
     gdth_cpar_str   *pcpar;
-    ulong64         paddr;
+    u64         paddr;
 
     char            cmnd[MAX_COMMAND_SIZE];
     memset(cmnd, 0xff, 12);
@@ -156,8 +156,8 @@
     off_t begin = 0,pos = 0;
     int id, i, j, k, sec, flag;
     int no_mdrv = 0, drv_no, is_mirr;
-    ulong32 cnt;
-    ulong64 paddr;
+    u32 cnt;
+    u64 paddr;
     int rc = -ENOMEM;
 
     gdth_cmd_str *gdtcmd;
@@ -220,14 +220,14 @@
 
     if (ha->more_proc)
         sprintf(hrec, "%d.%02d.%02d-%c%03X", 
-                (unchar)(ha->binfo.upd_fw_ver>>24),
-                (unchar)(ha->binfo.upd_fw_ver>>16),
-                (unchar)(ha->binfo.upd_fw_ver),
+                (u8)(ha->binfo.upd_fw_ver>>24),
+                (u8)(ha->binfo.upd_fw_ver>>16),
+                (u8)(ha->binfo.upd_fw_ver),
                 ha->bfeat.raid ? 'R':'N',
                 ha->binfo.upd_revision);
     else
-        sprintf(hrec, "%d.%02d", (unchar)(ha->cpar.version>>8),
-                (unchar)(ha->cpar.version));
+        sprintf(hrec, "%d.%02d", (u8)(ha->cpar.version>>8),
+                (u8)(ha->cpar.version));
 
     size = sprintf(buffer+len,
                    " Driver Ver.:  \t%-10s\tFirmware Ver.: \t%s\n",
@@ -281,7 +281,7 @@
             pds->bid = ha->raw[i].local_no;
             pds->first = 0;
             pds->entries = ha->raw[i].pdev_cnt;
-            cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(ulong32)) /
+            cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(u32)) /
                 sizeof(pds->list[0]);
             if (pds->entries > cnt)
                 pds->entries = cnt;
@@ -604,7 +604,7 @@
 
             size = sprintf(buffer+len,
                            " Capacity [MB]:\t%-6d    \tStart Sector:  \t%d\n",
-                           (ulong32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec);
+                           (u32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec);
             len += size;  pos = begin + len;
             if (pos < offset) {
                 len = 0;
@@ -664,9 +664,9 @@
 }
 
 static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
-                              ulong64 *paddr)
+                              u64 *paddr)
 {
-    ulong flags;
+    unsigned long flags;
     char *ret_val;
 
     if (size == 0)
@@ -691,9 +691,9 @@
     return ret_val;
 }
 
-static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr)
+static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr)
 {
-    ulong flags;
+    unsigned long flags;
 
     if (buf == ha->pscratch) {
 	spin_lock_irqsave(&ha->smp_lock, flags);
@@ -705,16 +705,16 @@
 }
 
 #ifdef GDTH_IOCTL_PROC
-static int gdth_ioctl_check_bin(gdth_ha_str *ha, ushort size)
+static int gdth_ioctl_check_bin(gdth_ha_str *ha, u16 size)
 {
-    ulong flags;
+    unsigned long flags;
     int ret_val;
 
     spin_lock_irqsave(&ha->smp_lock, flags);
 
     ret_val = FALSE;
     if (ha->scratch_busy) {
-        if (((gdth_iord_str *)ha->pscratch)->size == (ulong32)size)
+        if (((gdth_iord_str *)ha->pscratch)->size == (u32)size)
             ret_val = TRUE;
     }
     spin_unlock_irqrestore(&ha->smp_lock, flags);
@@ -724,11 +724,11 @@
 
 static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
 {
-    ulong flags;
+    unsigned long flags;
     int i;
     Scsi_Cmnd *scp;
     struct gdth_cmndinfo *cmndinfo;
-    unchar b, t;
+    u8 b, t;
 
     spin_lock_irqsave(&ha->smp_lock, flags);
 
@@ -738,8 +738,8 @@
 
         b = scp->device->channel;
         t = scp->device->id;
-        if (!SPECIAL_SCP(scp) && t == (unchar)id && 
-            b == (unchar)busnum) {
+        if (!SPECIAL_SCP(scp) && t == (u8)id && 
+            b == (u8)busnum) {
             cmndinfo->wait_for_completion = 0;
             spin_unlock_irqrestore(&ha->smp_lock, flags);
             while (!cmndinfo->wait_for_completion)
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index 9b900cc..dab15f5 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -17,8 +17,8 @@
                              int length, gdth_ha_str *ha);
 
 static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
-                              ulong64 *paddr);
-static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
+                              u64 *paddr);
+static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr);
 static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
 
 #endif
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index bb96fdd..03697ba 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -52,7 +52,7 @@
 #include "hpsa.h"
 
 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "1.0.0"
+#define HPSA_DRIVER_VERSION "2.0.1-3"
 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
 
 /* How long to wait (in milliseconds) for board to go into simple mode */
@@ -77,9 +77,6 @@
 
 /* define the PCI info for the cards we can control */
 static const struct pci_device_id hpsa_pci_device_id[] = {
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3223},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3234},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x323D},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
@@ -87,6 +84,9 @@
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324a},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324b},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
+#define PCI_DEVICE_ID_HP_CISSF 0x333f
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x333F},
 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,             PCI_ANY_ID, PCI_ANY_ID,
 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 	{0,}
@@ -99,9 +99,6 @@
  *  access = Address of the struct of function pointers
  */
 static struct board_type products[] = {
-	{0x3223103C, "Smart Array P800", &SA5_access},
-	{0x3234103C, "Smart Array P400", &SA5_access},
-	{0x323d103c, "Smart Array P700M", &SA5_access},
 	{0x3241103C, "Smart Array P212", &SA5_access},
 	{0x3243103C, "Smart Array P410", &SA5_access},
 	{0x3245103C, "Smart Array P410i", &SA5_access},
@@ -109,6 +106,8 @@
 	{0x3249103C, "Smart Array P812", &SA5_access},
 	{0x324a103C, "Smart Array P712m", &SA5_access},
 	{0x324b103C, "Smart Array P711m", &SA5_access},
+	{0x3233103C, "StorageWorks P1210m", &SA5_access},
+	{0x333F103C, "StorageWorks P1210m", &SA5_access},
 	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
 };
 
@@ -126,12 +125,15 @@
 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
 static struct CommandList *cmd_alloc(struct ctlr_info *h);
 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
-static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
-	void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
+static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
 	int cmd_type);
 
 static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
 		void (*done)(struct scsi_cmnd *));
+static void hpsa_scan_start(struct Scsi_Host *);
+static int hpsa_scan_finished(struct Scsi_Host *sh,
+	unsigned long elapsed_time);
 
 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
 static int hpsa_slave_alloc(struct scsi_device *sdev);
@@ -150,6 +152,11 @@
 	struct CommandList *c);
 static void check_ioctl_unit_attention(struct ctlr_info *h,
 	struct CommandList *c);
+/* performant mode helper functions */
+static void calc_bucket_map(int *bucket, int num_buckets,
+	int nsgs, int *bucket_map);
+static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
+static inline u32 next_command(struct ctlr_info *h);
 
 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
@@ -173,10 +180,10 @@
 	.name			= "hpsa",
 	.proc_name		= "hpsa",
 	.queuecommand		= hpsa_scsi_queue_command,
-	.can_queue		= 512,
+	.scan_start		= hpsa_scan_start,
+	.scan_finished		= hpsa_scan_finished,
 	.this_id		= -1,
 	.sg_tablesize		= MAXSGENTRIES,
-	.cmd_per_lun		= 512,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
 	.ioctl			= hpsa_ioctl,
@@ -195,6 +202,12 @@
 	return (struct ctlr_info *) *priv;
 }
 
+static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
+{
+	unsigned long *priv = shost_priv(sh);
+	return (struct ctlr_info *) *priv;
+}
+
 static struct task_struct *hpsa_scan_thread;
 static DEFINE_MUTEX(hpsa_scan_mutex);
 static LIST_HEAD(hpsa_scan_q);
@@ -312,7 +325,7 @@
 			h->busy_scanning = 1;
 			mutex_unlock(&hpsa_scan_mutex);
 			host_no = h->scsi_host ?  h->scsi_host->host_no : -1;
-			hpsa_update_scsi_devices(h, host_no);
+			hpsa_scan_start(h->scsi_host);
 			complete_all(&h->scan_wait);
 			mutex_lock(&hpsa_scan_mutex);
 			h->busy_scanning = 0;
@@ -379,8 +392,7 @@
 {
 	struct ctlr_info *h;
 	struct Scsi_Host *shost = class_to_shost(dev);
-	unsigned long *priv = shost_priv(shost);
-	h = (struct ctlr_info *) *priv;
+	h = shost_to_hba(shost);
 	if (add_to_scan_list(h)) {
 		wake_up_process(hpsa_scan_thread);
 		wait_for_completion_interruptible(&h->scan_wait);
@@ -394,10 +406,44 @@
 	hlist_add_head(&c->list, list);
 }
 
+static inline u32 next_command(struct ctlr_info *h)
+{
+	u32 a;
+
+	if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
+		return h->access.command_completed(h);
+
+	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+		a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+		(h->reply_pool_head)++;
+		h->commands_outstanding--;
+	} else {
+		a = FIFO_EMPTY;
+	}
+	/* Check for wraparound */
+	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+		h->reply_pool_head = h->reply_pool;
+		h->reply_pool_wraparound ^= 1;
+	}
+	return a;
+}
+
+/* set_performant_mode: Modify the tag for cciss performant
+ * set bit 0 for pull model, bits 3-1 for block fetch
+ * register number
+ */
+static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
+{
+	if (likely(h->transMethod == CFGTBL_Trans_Performant))
+		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+}
+
 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
 	struct CommandList *c)
 {
 	unsigned long flags;
+
+	set_performant_mode(h, c);
 	spin_lock_irqsave(&h->lock, flags);
 	addQ(&h->reqQ, c);
 	h->Qdepth++;
@@ -422,6 +468,15 @@
 	return (scsi3addr[3] & 0xC0) == 0x40;
 }
 
+static inline int is_scsi_rev_5(struct ctlr_info *h)
+{
+	if (!h->hba_inquiry_data)
+		return 0;
+	if ((h->hba_inquiry_data[2] & 0x07) == 5)
+		return 1;
+	return 0;
+}
+
 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
 	"UNKNOWN"
 };
@@ -431,7 +486,7 @@
 	     struct device_attribute *attr, char *buf)
 {
 	ssize_t l = 0;
-	int rlevel;
+	unsigned char rlevel;
 	struct ctlr_info *h;
 	struct scsi_device *sdev;
 	struct hpsa_scsi_dev_t *hdev;
@@ -455,7 +510,7 @@
 
 	rlevel = hdev->raid_level;
 	spin_unlock_irqrestore(&h->lock, flags);
-	if (rlevel < 0 || rlevel > RAID_UNKNOWN)
+	if (rlevel > RAID_UNKNOWN)
 		rlevel = RAID_UNKNOWN;
 	l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
 	return l;
@@ -620,6 +675,24 @@
 	return 0;
 }
 
+/* Replace an entry from h->dev[] array. */
+static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
+	int entry, struct hpsa_scsi_dev_t *new_entry,
+	struct hpsa_scsi_dev_t *added[], int *nadded,
+	struct hpsa_scsi_dev_t *removed[], int *nremoved)
+{
+	/* assumes h->devlock is held */
+	BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
+	removed[*nremoved] = h->dev[entry];
+	(*nremoved)++;
+	h->dev[entry] = new_entry;
+	added[*nadded] = new_entry;
+	(*nadded)++;
+	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
+		scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
+			new_entry->target, new_entry->lun);
+}
+
 /* Remove an entry from h->dev[] array. */
 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
 	struct hpsa_scsi_dev_t *removed[], int *nremoved)
@@ -628,8 +701,7 @@
 	int i;
 	struct hpsa_scsi_dev_t *sd;
 
-	if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA)
-		BUG();
+	BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
 
 	sd = h->dev[entry];
 	removed[*nremoved] = h->dev[entry];
@@ -722,6 +794,8 @@
 #define DEVICE_CHANGED 1
 #define DEVICE_SAME 2
 	for (i = 0; i < haystack_size; i++) {
+		if (haystack[i] == NULL) /* previously removed. */
+			continue;
 		if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
 			*index = i;
 			if (device_is_the_same(needle, haystack[i]))
@@ -734,7 +808,7 @@
 	return DEVICE_NOT_FOUND;
 }
 
-static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
+static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
 	struct hpsa_scsi_dev_t *sd[], int nsds)
 {
 	/* sd contains scsi3 addresses and devtypes, and inquiry
@@ -779,12 +853,12 @@
 			continue; /* remove ^^^, hence i not incremented */
 		} else if (device_change == DEVICE_CHANGED) {
 			changes++;
-			hpsa_scsi_remove_entry(h, hostno, i,
-				removed, &nremoved);
-			(void) hpsa_scsi_add_entry(h, hostno, sd[entry],
-				added, &nadded);
-			/* add can't fail, we just removed one. */
-			sd[entry] = NULL; /* prevent it from being freed */
+			hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
+				added, &nadded, removed, &nremoved);
+			/* Set it to NULL to prevent it from being freed
+			 * at the bottom of hpsa_update_scsi_devices()
+			 */
+			sd[entry] = NULL;
 		}
 		i++;
 	}
@@ -860,7 +934,6 @@
 free_and_out:
 	kfree(added);
 	kfree(removed);
-	return 0;
 }
 
 /*
@@ -900,7 +973,7 @@
 
 static void hpsa_slave_destroy(struct scsi_device *sdev)
 {
-	return; /* nothing to do. */
+	/* nothing to do. */
 }
 
 static void hpsa_scsi_setup(struct ctlr_info *h)
@@ -908,11 +981,10 @@
 	h->ndevices = 0;
 	h->scsi_host = NULL;
 	spin_lock_init(&h->devlock);
-	return;
 }
 
 static void complete_scsi_command(struct CommandList *cp,
-	int timeout, __u32 tag)
+	int timeout, u32 tag)
 {
 	struct scsi_cmnd *cmd;
 	struct ctlr_info *h;
@@ -987,7 +1059,6 @@
 				 * required
 				 */
 				if ((asc == 0x04) && (ascq == 0x03)) {
-					cmd->result = DID_NO_CONNECT << 16;
 					dev_warn(&h->pdev->dev, "cp %p "
 						"has check condition: unit "
 						"not ready, manual "
@@ -995,14 +1066,22 @@
 					break;
 				}
 			}
-
-
+			if (sense_key == ABORTED_COMMAND) {
+				/* Aborted command is retryable */
+				dev_warn(&h->pdev->dev, "cp %p "
+					"has check condition: aborted command: "
+					"ASC: 0x%x, ASCQ: 0x%x\n",
+					cp, asc, ascq);
+				cmd->result = DID_SOFT_ERROR << 16;
+				break;
+			}
 			/* Must be some other type of check condition */
 			dev_warn(&h->pdev->dev, "cp %p has check condition: "
 					"unknown type: "
 					"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
 					"Returning result: 0x%x, "
 					"cmd=[%02x %02x %02x %02x %02x "
+					"%02x %02x %02x %02x %02x %02x "
 					"%02x %02x %02x %02x %02x]\n",
 					cp, sense_key, asc, ascq,
 					cmd->result,
@@ -1010,7 +1089,10 @@
 					cmd->cmnd[2], cmd->cmnd[3],
 					cmd->cmnd[4], cmd->cmnd[5],
 					cmd->cmnd[6], cmd->cmnd[7],
-					cmd->cmnd[8], cmd->cmnd[9]);
+					cmd->cmnd[8], cmd->cmnd[9],
+					cmd->cmnd[10], cmd->cmnd[11],
+					cmd->cmnd[12], cmd->cmnd[13],
+					cmd->cmnd[14], cmd->cmnd[15]);
 			break;
 		}
 
@@ -1086,7 +1168,7 @@
 		dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
 		break;
 	case CMD_UNSOLICITED_ABORT:
-		cmd->result = DID_ABORT << 16;
+		cmd->result = DID_RESET << 16;
 		dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
 			"abort\n", cp);
 		break;
@@ -1119,9 +1201,11 @@
 	sh->max_cmd_len = MAX_COMMAND_SIZE;
 	sh->max_lun = HPSA_MAX_LUN;
 	sh->max_id = HPSA_MAX_LUN;
+	sh->can_queue = h->nr_cmds;
+	sh->cmd_per_lun = h->nr_cmds;
 	h->scsi_host = sh;
 	sh->hostdata[0] = (unsigned long) h;
-	sh->irq = h->intr[SIMPLE_MODE_INT];
+	sh->irq = h->intr[PERF_MODE_INT];
 	sh->unique_id = sh->irq;
 	error = scsi_add_host(sh, &h->pdev->dev);
 	if (error)
@@ -1133,11 +1217,11 @@
 	dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
 		" failed for controller %d\n", h->ctlr);
 	scsi_host_put(sh);
-	return -1;
+	return error;
  fail:
 	dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
 		" failed for controller %d\n", h->ctlr);
-	return -1;
+	return -ENOMEM;
 }
 
 static void hpsa_pci_unmap(struct pci_dev *pdev,
@@ -1160,7 +1244,7 @@
 		size_t buflen,
 		int data_direction)
 {
-	__u64 addr64;
+	u64 addr64;
 
 	if (buflen == 0 || data_direction == PCI_DMA_NONE) {
 		cp->Header.SGList = 0;
@@ -1168,14 +1252,14 @@
 		return;
 	}
 
-	addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
+	addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
 	cp->SG[0].Addr.lower =
-	  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+	  (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
 	cp->SG[0].Addr.upper =
-	  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+	  (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
 	cp->SG[0].Len = buflen;
-	cp->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
-	cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
+	cp->Header.SGList = (u8) 1;   /* no. SGs contig in this cmd */
+	cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
 }
 
 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
@@ -1274,7 +1358,7 @@
 
 	if (c == NULL) {			/* trouble... */
 		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
-		return -1;
+		return -ENOMEM;
 	}
 
 	fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
@@ -1366,9 +1450,8 @@
 		dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
 		return -1;
 	}
-
-	memset(&scsi3addr[0], 0, 8); /* address the controller */
-
+	/* address the controller */
+	memset(scsi3addr, 0, sizeof(scsi3addr));
 	fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
 		buf, bufsize, 0, scsi3addr, TYPE_CMD);
 	if (extended_response)
@@ -1409,13 +1492,12 @@
 	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
 {
 #define OBDR_TAPE_INQ_SIZE 49
-	unsigned char *inq_buff = NULL;
+	unsigned char *inq_buff;
 
-	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
+	inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
 	if (!inq_buff)
 		goto bail_out;
 
-	memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
 	/* Do an inquiry to the device to see what it is. */
 	if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
 		(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
@@ -1485,32 +1567,51 @@
  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
  */
 static void figure_bus_target_lun(struct ctlr_info *h,
-	__u8 *lunaddrbytes, int *bus, int *target, int *lun,
+	u8 *lunaddrbytes, int *bus, int *target, int *lun,
 	struct hpsa_scsi_dev_t *device)
 {
-
-	__u32 lunid;
+	u32 lunid;
 
 	if (is_logical_dev_addr_mode(lunaddrbytes)) {
 		/* logical device */
-		memcpy(&lunid, lunaddrbytes, sizeof(lunid));
-		lunid = le32_to_cpu(lunid);
-
-		if (is_msa2xxx(h, device)) {
-			*bus = 1;
-			*target = (lunid >> 16) & 0x3fff;
-			*lun = lunid & 0x00ff;
-		} else {
+		if (unlikely(is_scsi_rev_5(h))) {
+			/* p1210m, logical drives lun assignments
+			 * match SCSI REPORT LUNS data.
+			 */
+			lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
 			*bus = 0;
-			*lun = 0;
-			*target = lunid & 0x3fff;
+			*target = 0;
+			*lun = (lunid & 0x3fff) + 1;
+		} else {
+			/* not p1210m... */
+			lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
+			if (is_msa2xxx(h, device)) {
+				/* msa2xxx way, put logicals on bus 1
+				 * and match target/lun numbers box
+				 * reports.
+				 */
+				*bus = 1;
+				*target = (lunid >> 16) & 0x3fff;
+				*lun = lunid & 0x00ff;
+			} else {
+				/* Traditional smart array way. */
+				*bus = 0;
+				*lun = 0;
+				*target = lunid & 0x3fff;
+			}
 		}
 	} else {
 		/* physical device */
 		if (is_hba_lunid(lunaddrbytes))
-			*bus = 3;
+			if (unlikely(is_scsi_rev_5(h))) {
+				*bus = 0; /* put p1210m ctlr at 0,0,0 */
+				*target = 0;
+				*lun = 0;
+				return;
+			} else
+				*bus = 3; /* traditional smartarray */
 		else
-			*bus = 2;
+			*bus = 2; /* physical disk */
 		*target = -1;
 		*lun = -1; /* we will fill these in later. */
 	}
@@ -1529,7 +1630,7 @@
  */
 static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
 	struct hpsa_scsi_dev_t *tmpdevice,
-	struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes,
+	struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
 	int bus, int target, int lun, unsigned long lunzerobits[],
 	int *nmsa2xxx_enclosures)
 {
@@ -1550,6 +1651,9 @@
 	if (is_hba_lunid(scsi3addr))
 		return 0; /* Don't add the RAID controller here. */
 
+	if (is_scsi_rev_5(h))
+		return 0; /* p1210m doesn't need to do this. */
+
 #define MAX_MSA2XXX_ENCLOSURES 32
 	if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
 		dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
@@ -1576,18 +1680,14 @@
  */
 static int hpsa_gather_lun_info(struct ctlr_info *h,
 	int reportlunsize,
-	struct ReportLUNdata *physdev, __u32 *nphysicals,
-	struct ReportLUNdata *logdev, __u32 *nlogicals)
+	struct ReportLUNdata *physdev, u32 *nphysicals,
+	struct ReportLUNdata *logdev, u32 *nlogicals)
 {
 	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
 		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
 		return -1;
 	}
-	memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals));
-	*nphysicals = be32_to_cpu(*nphysicals) / 8;
-#ifdef DEBUG
-	dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals);
-#endif
+	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
 	if (*nphysicals > HPSA_MAX_PHYS_LUN) {
 		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
 			"  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
@@ -1598,11 +1698,7 @@
 		dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
 		return -1;
 	}
-	memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals));
-	*nlogicals = be32_to_cpu(*nlogicals) / 8;
-#ifdef DEBUG
-	dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals);
-#endif
+	*nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
 	/* Reject Logicals in excess of our max capability. */
 	if (*nlogicals > HPSA_MAX_LUN) {
 		dev_warn(&h->pdev->dev,
@@ -1621,6 +1717,31 @@
 	return 0;
 }
 
+u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
+	int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
+	struct ReportLUNdata *logdev_list)
+{
+	/* Helper function, figure out where the LUN ID info is coming from
+	 * given index i, lists of physical and logical devices, where in
+	 * the list the raid controller is supposed to appear (first or last)
+	 */
+
+	int logicals_start = nphysicals + (raid_ctlr_position == 0);
+	int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
+
+	if (i == raid_ctlr_position)
+		return RAID_CTLR_LUNID;
+
+	if (i < logicals_start)
+		return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
+
+	if (i < last_device)
+		return &logdev_list->LUN[i - nphysicals -
+			(raid_ctlr_position == 0)][0];
+	BUG();
+	return NULL;
+}
+
 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
 {
 	/* the idea here is we could get notified
@@ -1636,14 +1757,15 @@
 	struct ReportLUNdata *physdev_list = NULL;
 	struct ReportLUNdata *logdev_list = NULL;
 	unsigned char *inq_buff = NULL;
-	__u32 nphysicals = 0;
-	__u32 nlogicals = 0;
-	__u32 ndev_allocated = 0;
+	u32 nphysicals = 0;
+	u32 nlogicals = 0;
+	u32 ndev_allocated = 0;
 	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
 	int ncurrent = 0;
 	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
 	int i, nmsa2xxx_enclosures, ndevs_to_allocate;
 	int bus, target, lun;
+	int raid_ctlr_position;
 	DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
 
 	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
@@ -1681,23 +1803,22 @@
 		ndev_allocated++;
 	}
 
+	if (unlikely(is_scsi_rev_5(h)))
+		raid_ctlr_position = 0;
+	else
+		raid_ctlr_position = nphysicals + nlogicals;
+
 	/* adjust our table of devices */
 	nmsa2xxx_enclosures = 0;
 	for (i = 0; i < nphysicals + nlogicals + 1; i++) {
-		__u8 *lunaddrbytes;
+		u8 *lunaddrbytes;
 
 		/* Figure out where the LUN ID info is coming from */
-		if (i < nphysicals)
-			lunaddrbytes = &physdev_list->LUN[i][0];
-		else
-			if (i < nphysicals + nlogicals)
-				lunaddrbytes =
-					&logdev_list->LUN[i-nphysicals][0];
-			else /* jam in the RAID controller at the end */
-				lunaddrbytes = RAID_CTLR_LUNID;
-
+		lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
+			i, nphysicals, nlogicals, physdev_list, logdev_list);
 		/* skip masked physical devices. */
-		if (lunaddrbytes[3] & 0xC0 && i < nphysicals)
+		if (lunaddrbytes[3] & 0xC0 &&
+			i < nphysicals + (raid_ctlr_position == 0))
 			continue;
 
 		/* Get device type, vendor, model, device id */
@@ -1777,7 +1898,6 @@
 	kfree(inq_buff);
 	kfree(physdev_list);
 	kfree(logdev_list);
-	return;
 }
 
 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
@@ -1790,7 +1910,7 @@
 {
 	unsigned int len;
 	struct scatterlist *sg;
-	__u64 addr64;
+	u64 addr64;
 	int use_sg, i;
 
 	BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
@@ -1803,20 +1923,20 @@
 		goto sglist_finished;
 
 	scsi_for_each_sg(cmd, sg, use_sg, i) {
-		addr64 = (__u64) sg_dma_address(sg);
+		addr64 = (u64) sg_dma_address(sg);
 		len  = sg_dma_len(sg);
 		cp->SG[i].Addr.lower =
-			(__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+			(u32) (addr64 & (u64) 0x00000000FFFFFFFF);
 		cp->SG[i].Addr.upper =
-			(__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+			(u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
 		cp->SG[i].Len = len;
 		cp->SG[i].Ext = 0;  /* we are not chaining */
 	}
 
 sglist_finished:
 
-	cp->Header.SGList = (__u8) use_sg;   /* no. SGs contig in this cmd */
-	cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
+	cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
+	cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
 	return 0;
 }
 
@@ -1860,7 +1980,8 @@
 	c->scsi_cmd = cmd;
 	c->Header.ReplyQueue = 0;  /* unused in simple mode */
 	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
-	c->Header.Tag.lower = c->busaddr;  /* Use k. address of cmd as tag */
+	c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
+	c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
 
 	/* Fill in the request block... */
 
@@ -1914,6 +2035,48 @@
 	return 0;
 }
 
+static void hpsa_scan_start(struct Scsi_Host *sh)
+{
+	struct ctlr_info *h = shost_to_hba(sh);
+	unsigned long flags;
+
+	/* wait until any scan already in progress is finished. */
+	while (1) {
+		spin_lock_irqsave(&h->scan_lock, flags);
+		if (h->scan_finished)
+			break;
+		spin_unlock_irqrestore(&h->scan_lock, flags);
+		wait_event(h->scan_wait_queue, h->scan_finished);
+		/* Note: We don't need to worry about a race between this
+		 * thread and driver unload because the midlayer will
+		 * have incremented the reference count, so unload won't
+		 * happen if we're in here.
+		 */
+	}
+	h->scan_finished = 0; /* mark scan as in progress */
+	spin_unlock_irqrestore(&h->scan_lock, flags);
+
+	hpsa_update_scsi_devices(h, h->scsi_host->host_no);
+
+	spin_lock_irqsave(&h->scan_lock, flags);
+	h->scan_finished = 1; /* mark scan as finished. */
+	wake_up_all(&h->scan_wait_queue);
+	spin_unlock_irqrestore(&h->scan_lock, flags);
+}
+
+static int hpsa_scan_finished(struct Scsi_Host *sh,
+	unsigned long elapsed_time)
+{
+	struct ctlr_info *h = shost_to_hba(sh);
+	unsigned long flags;
+	int finished;
+
+	spin_lock_irqsave(&h->scan_lock, flags);
+	finished = h->scan_finished;
+	spin_unlock_irqrestore(&h->scan_lock, flags);
+	return finished;
+}
+
 static void hpsa_unregister_scsi(struct ctlr_info *h)
 {
 	/* we are being forcibly unloaded, and may not refuse. */
@@ -1926,7 +2089,6 @@
 {
 	int rc;
 
-	hpsa_update_scsi_devices(h, -1);
 	rc = hpsa_scsi_detect(h);
 	if (rc != 0)
 		dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
@@ -2003,14 +2165,14 @@
 	h = sdev_to_hba(scsicmd->device);
 	if (h == NULL) /* paranoia */
 		return FAILED;
-	dev_warn(&h->pdev->dev, "resetting drive\n");
-
 	dev = scsicmd->device->hostdata;
 	if (!dev) {
 		dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
 			"device lookup failed.\n");
 		return FAILED;
 	}
+	dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
+		h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
 	/* send a reset to the SCSI LUN which the command was sent to */
 	rc = hpsa_send_reset(h, dev->scsi3addr);
 	if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
@@ -2053,8 +2215,8 @@
 	c->cmdindex = i;
 
 	INIT_HLIST_NODE(&c->list);
-	c->busaddr = (__u32) cmd_dma_handle;
-	temp64.val = (__u64) err_dma_handle;
+	c->busaddr = (u32) cmd_dma_handle;
+	temp64.val = (u64) err_dma_handle;
 	c->ErrDesc.Addr.lower = temp64.val32.lower;
 	c->ErrDesc.Addr.upper = temp64.val32.upper;
 	c->ErrDesc.Len = sizeof(*c->err_info);
@@ -2091,8 +2253,8 @@
 	memset(c->err_info, 0, sizeof(*c->err_info));
 
 	INIT_HLIST_NODE(&c->list);
-	c->busaddr = (__u32) cmd_dma_handle;
-	temp64.val = (__u64) err_dma_handle;
+	c->busaddr = (u32) cmd_dma_handle;
+	temp64.val = (u64) err_dma_handle;
 	c->ErrDesc.Addr.lower = temp64.val32.lower;
 	c->ErrDesc.Addr.upper = temp64.val32.upper;
 	c->ErrDesc.Len = sizeof(*c->err_info);
@@ -2125,50 +2287,6 @@
 
 #ifdef CONFIG_COMPAT
 
-static int do_ioctl(struct scsi_device *dev, int cmd, void *arg)
-{
-	int ret;
-
-	lock_kernel();
-	ret = hpsa_ioctl(dev, cmd, arg);
-	unlock_kernel();
-	return ret;
-}
-
-static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
-static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
-	int cmd, void *arg);
-
-static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
-{
-	switch (cmd) {
-	case CCISS_GETPCIINFO:
-	case CCISS_GETINTINFO:
-	case CCISS_SETINTINFO:
-	case CCISS_GETNODENAME:
-	case CCISS_SETNODENAME:
-	case CCISS_GETHEARTBEAT:
-	case CCISS_GETBUSTYPES:
-	case CCISS_GETFIRMVER:
-	case CCISS_GETDRIVVER:
-	case CCISS_REVALIDVOLS:
-	case CCISS_DEREGDISK:
-	case CCISS_REGNEWDISK:
-	case CCISS_REGNEWD:
-	case CCISS_RESCANDISK:
-	case CCISS_GETLUNINFO:
-		return do_ioctl(dev, cmd, arg);
-
-	case CCISS_PASSTHRU32:
-		return hpsa_ioctl32_passthru(dev, cmd, arg);
-	case CCISS_BIG_PASSTHRU32:
-		return hpsa_ioctl32_big_passthru(dev, cmd, arg);
-
-	default:
-		return -ENOIOCTLCMD;
-	}
-}
-
 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
 {
 	IOCTL32_Command_struct __user *arg32 =
@@ -2193,7 +2311,7 @@
 	if (err)
 		return -EFAULT;
 
-	err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p);
+	err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
 	if (err)
 		return err;
 	err |= copy_in_user(&arg32->error_info, &p->error_info,
@@ -2230,7 +2348,7 @@
 	if (err)
 		return -EFAULT;
 
-	err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
+	err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
 	if (err)
 		return err;
 	err |= copy_in_user(&arg32->error_info, &p->error_info,
@@ -2239,6 +2357,36 @@
 		return -EFAULT;
 	return err;
 }
+
+static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
+{
+	switch (cmd) {
+	case CCISS_GETPCIINFO:
+	case CCISS_GETINTINFO:
+	case CCISS_SETINTINFO:
+	case CCISS_GETNODENAME:
+	case CCISS_SETNODENAME:
+	case CCISS_GETHEARTBEAT:
+	case CCISS_GETBUSTYPES:
+	case CCISS_GETFIRMVER:
+	case CCISS_GETDRIVVER:
+	case CCISS_REVALIDVOLS:
+	case CCISS_DEREGDISK:
+	case CCISS_REGNEWDISK:
+	case CCISS_REGNEWD:
+	case CCISS_RESCANDISK:
+	case CCISS_GETLUNINFO:
+		return hpsa_ioctl(dev, cmd, arg);
+
+	case CCISS_PASSTHRU32:
+		return hpsa_ioctl32_passthru(dev, cmd, arg);
+	case CCISS_BIG_PASSTHRU32:
+		return hpsa_ioctl32_big_passthru(dev, cmd, arg);
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
 #endif
 
 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
@@ -2378,8 +2526,8 @@
 	BYTE sg_used = 0;
 	int status = 0;
 	int i;
-	__u32 left;
-	__u32 sz;
+	u32 left;
+	u32 sz;
 	BYTE __user *data_ptr;
 
 	if (!argp)
@@ -2527,7 +2675,7 @@
 	case CCISS_DEREGDISK:
 	case CCISS_REGNEWDISK:
 	case CCISS_REGNEWD:
-		hpsa_update_scsi_devices(h, dev->host->host_no);
+		hpsa_scan_start(h->scsi_host);
 		return 0;
 	case CCISS_GETPCIINFO:
 		return hpsa_getpciinfo_ioctl(h, argp);
@@ -2542,8 +2690,8 @@
 	}
 }
 
-static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
-	void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
+static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
 	int cmd_type)
 {
 	int pci_dir = XFER_NONE;
@@ -2710,19 +2858,20 @@
 	return h->access.command_completed(h);
 }
 
-static inline int interrupt_pending(struct ctlr_info *h)
+static inline bool interrupt_pending(struct ctlr_info *h)
 {
 	return h->access.intr_pending(h);
 }
 
 static inline long interrupt_not_for_us(struct ctlr_info *h)
 {
-	return ((h->access.intr_pending(h) == 0) ||
-		 (h->interrupts_enabled == 0));
+	return !(h->msi_vector || h->msix_vector) &&
+		((h->access.intr_pending(h) == 0) ||
+		(h->interrupts_enabled == 0));
 }
 
-static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
-	__u32 raw_tag)
+static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
+	u32 raw_tag)
 {
 	if (unlikely(tag_index >= h->nr_cmds)) {
 		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
@@ -2731,7 +2880,7 @@
 	return 0;
 }
 
-static inline void finish_cmd(struct CommandList *c, __u32 raw_tag)
+static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
 {
 	removeQ(c);
 	if (likely(c->cmd_type == CMD_SCSI))
@@ -2740,42 +2889,79 @@
 		complete(c->waiting);
 }
 
+static inline u32 hpsa_tag_contains_index(u32 tag)
+{
+#define DIRECT_LOOKUP_BIT 0x10
+	return tag & DIRECT_LOOKUP_BIT;
+}
+
+static inline u32 hpsa_tag_to_index(u32 tag)
+{
+#define DIRECT_LOOKUP_SHIFT 5
+	return tag >> DIRECT_LOOKUP_SHIFT;
+}
+
+static inline u32 hpsa_tag_discard_error_bits(u32 tag)
+{
+#define HPSA_ERROR_BITS 0x03
+	return tag & ~HPSA_ERROR_BITS;
+}
+
+/* process completion of an indexed ("direct lookup") command */
+static inline u32 process_indexed_cmd(struct ctlr_info *h,
+	u32 raw_tag)
+{
+	u32 tag_index;
+	struct CommandList *c;
+
+	tag_index = hpsa_tag_to_index(raw_tag);
+	if (bad_tag(h, tag_index, raw_tag))
+		return next_command(h);
+	c = h->cmd_pool + tag_index;
+	finish_cmd(c, raw_tag);
+	return next_command(h);
+}
+
+/* process completion of a non-indexed command */
+static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
+	u32 raw_tag)
+{
+	u32 tag;
+	struct CommandList *c = NULL;
+	struct hlist_node *tmp;
+
+	tag = hpsa_tag_discard_error_bits(raw_tag);
+	hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+		if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
+			finish_cmd(c, raw_tag);
+			return next_command(h);
+		}
+	}
+	bad_tag(h, h->nr_cmds + 1, raw_tag);
+	return next_command(h);
+}
+
 static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
 {
 	struct ctlr_info *h = dev_id;
-	struct CommandList *c;
 	unsigned long flags;
-	__u32 raw_tag, tag, tag_index;
-	struct hlist_node *tmp;
+	u32 raw_tag;
 
 	if (interrupt_not_for_us(h))
 		return IRQ_NONE;
 	spin_lock_irqsave(&h->lock, flags);
-	while (interrupt_pending(h)) {
-		while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) {
-			if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) {
-				tag_index = HPSA_TAG_TO_INDEX(raw_tag);
-				if (bad_tag(h, tag_index, raw_tag))
-					return IRQ_HANDLED;
-				c = h->cmd_pool + tag_index;
-				finish_cmd(c, raw_tag);
-				continue;
-			}
-			tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag);
-			c = NULL;
-			hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
-				if (c->busaddr == tag) {
-					finish_cmd(c, raw_tag);
-					break;
-				}
-			}
-		}
+	raw_tag = get_next_completion(h);
+	while (raw_tag != FIFO_EMPTY) {
+		if (hpsa_tag_contains_index(raw_tag))
+			raw_tag = process_indexed_cmd(h, raw_tag);
+		else
+			raw_tag = process_nonindexed_cmd(h, raw_tag);
 	}
 	spin_unlock_irqrestore(&h->lock, flags);
 	return IRQ_HANDLED;
 }
 
-/* Send a message CDB to the firmware. */
+/* Send a message CDB to the firmwart. */
 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
 						unsigned char type)
 {
@@ -2841,7 +3027,7 @@
 
 	for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
 		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
-		if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32)
+		if (hpsa_tag_discard_error_bits(tag) == paddr32)
 			break;
 		msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
 	}
@@ -3063,7 +3249,7 @@
  */
 
 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
-					   struct pci_dev *pdev, __u32 board_id)
+					   struct pci_dev *pdev, u32 board_id)
 {
 #ifdef CONFIG_PCI_MSI
 	int err;
@@ -3107,22 +3293,22 @@
 default_int_mode:
 #endif				/* CONFIG_PCI_MSI */
 	/* if we get here we're going to use the default interrupt mode */
-	h->intr[SIMPLE_MODE_INT] = pdev->irq;
-	return;
+	h->intr[PERF_MODE_INT] = pdev->irq;
 }
 
 static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
 {
 	ushort subsystem_vendor_id, subsystem_device_id, command;
-	__u32 board_id, scratchpad = 0;
-	__u64 cfg_offset;
-	__u32 cfg_base_addr;
-	__u64 cfg_base_addr_index;
+	u32 board_id, scratchpad = 0;
+	u64 cfg_offset;
+	u32 cfg_base_addr;
+	u64 cfg_base_addr_index;
+	u32 trans_offset;
 	int i, prod_index, err;
 
 	subsystem_vendor_id = pdev->subsystem_vendor;
 	subsystem_device_id = pdev->subsystem_device;
-	board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
+	board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) |
 		    subsystem_vendor_id);
 
 	for (i = 0; i < ARRAY_SIZE(products); i++)
@@ -3199,7 +3385,7 @@
 
 	/* get the address index number */
 	cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
-	cfg_base_addr &= (__u32) 0x0000ffff;
+	cfg_base_addr &= (u32) 0x0000ffff;
 	cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
 	if (cfg_base_addr_index == -1) {
 		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
@@ -3211,11 +3397,14 @@
 	h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
 			       cfg_base_addr_index) + cfg_offset,
 				sizeof(h->cfgtable));
+	/* Find performant mode table. */
+	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
+	h->transtable = remap_pci_mem(pci_resource_start(pdev,
+				cfg_base_addr_index)+cfg_offset+trans_offset,
+				sizeof(*h->transtable));
+
 	h->board_id = board_id;
-
-	/* Query controller for max supported commands: */
-	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
-
+	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
 	h->product_name = products[prod_index].product_name;
 	h->access = *(products[prod_index].access);
 	/* Allow room for some ioctls */
@@ -3232,7 +3421,7 @@
 #ifdef CONFIG_X86
 	{
 		/* Need to enable prefetch in the SCSI core for 6400 in x86 */
-		__u32 prefetch;
+		u32 prefetch;
 		prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
 		prefetch |= 0x100;
 		writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
@@ -3244,7 +3433,7 @@
 	 * physical memory.
 	 */
 	if (board_id == 0x3225103C) {
-		__u32 dma_prefetch;
+		u32 dma_prefetch;
 		dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
 		dma_prefetch |= 0x8000;
 		writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
@@ -3286,10 +3475,26 @@
 	return err;
 }
 
+static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
+{
+	int rc;
+
+#define HBA_INQUIRY_BYTE_COUNT 64
+	h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
+	if (!h->hba_inquiry_data)
+		return;
+	rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
+		h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
+	if (rc != 0) {
+		kfree(h->hba_inquiry_data);
+		h->hba_inquiry_data = NULL;
+	}
+}
+
 static int __devinit hpsa_init_one(struct pci_dev *pdev,
 				    const struct pci_device_id *ent)
 {
-	int i;
+	int i, rc;
 	int dac;
 	struct ctlr_info *h;
 
@@ -3314,17 +3519,23 @@
 		}
 	}
 
-	BUILD_BUG_ON(sizeof(struct CommandList) % 8);
+	/* Command structures must be aligned on a 32-byte boundary because
+	 * the 5 lower bits of the address are used by the hardware. and by
+	 * the driver.  See comments in hpsa.h for more info.
+	 */
+#define COMMANDLIST_ALIGNMENT 32
+	BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
 	h = kzalloc(sizeof(*h), GFP_KERNEL);
 	if (!h)
-		return -1;
+		return -ENOMEM;
 
 	h->busy_initializing = 1;
 	INIT_HLIST_HEAD(&h->cmpQ);
 	INIT_HLIST_HEAD(&h->reqQ);
 	mutex_init(&h->busy_shutting_down);
 	init_completion(&h->scan_wait);
-	if (hpsa_pci_init(h, pdev) != 0)
+	rc = hpsa_pci_init(h, pdev);
+	if (rc != 0)
 		goto clean1;
 
 	sprintf(h->devname, "hpsa%d", number_of_controllers);
@@ -3333,27 +3544,32 @@
 	h->pdev = pdev;
 
 	/* configure PCI DMA stuff */
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (rc == 0) {
 		dac = 1;
-	else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
-		dac = 0;
-	else {
-		dev_err(&pdev->dev, "no suitable DMA available\n");
-		goto clean1;
+	} else {
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc == 0) {
+			dac = 0;
+		} else {
+			dev_err(&pdev->dev, "no suitable DMA available\n");
+			goto clean1;
+		}
 	}
 
 	/* make sure the board interrupts are off */
 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
-	if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr,
-			IRQF_DISABLED | IRQF_SHARED, h->devname, h)) {
+	rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr,
+			IRQF_DISABLED, h->devname, h);
+	if (rc) {
 		dev_err(&pdev->dev, "unable to get irq %d for %s\n",
-		       h->intr[SIMPLE_MODE_INT], h->devname);
+		       h->intr[PERF_MODE_INT], h->devname);
 		goto clean2;
 	}
 
-	dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
-	       h->devname, pdev->device, pci_name(pdev),
-	       h->intr[SIMPLE_MODE_INT], dac ? "" : " not");
+	dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
+	       h->devname, pdev->device,
+	       h->intr[PERF_MODE_INT], dac ? "" : " not");
 
 	h->cmd_pool_bits =
 	    kmalloc(((h->nr_cmds + BITS_PER_LONG -
@@ -3368,9 +3584,13 @@
 	    || (h->cmd_pool == NULL)
 	    || (h->errinfo_pool == NULL)) {
 		dev_err(&pdev->dev, "out of memory");
+		rc = -ENOMEM;
 		goto clean4;
 	}
 	spin_lock_init(&h->lock);
+	spin_lock_init(&h->scan_lock);
+	init_waitqueue_head(&h->scan_wait_queue);
+	h->scan_finished = 1; /* no scan currently in progress */
 
 	pci_set_drvdata(pdev, h);
 	memset(h->cmd_pool_bits, 0,
@@ -3382,6 +3602,8 @@
 	/* Turn the interrupts on so we can service requests */
 	h->access.set_intr_mask(h, HPSA_INTR_ON);
 
+	hpsa_put_ctlr_into_performant_mode(h);
+	hpsa_hba_inquiry(h);
 	hpsa_register_scsi(h);	/* hook ourselves into SCSI subsystem */
 	h->busy_initializing = 0;
 	return 1;
@@ -3397,12 +3619,12 @@
 			    h->nr_cmds * sizeof(struct ErrorInfo),
 			    h->errinfo_pool,
 			    h->errinfo_pool_dhandle);
-	free_irq(h->intr[SIMPLE_MODE_INT], h);
+	free_irq(h->intr[PERF_MODE_INT], h);
 clean2:
 clean1:
 	h->busy_initializing = 0;
 	kfree(h);
-	return -1;
+	return rc;
 }
 
 static void hpsa_flush_cache(struct ctlr_info *h)
@@ -3441,7 +3663,7 @@
 	 */
 	hpsa_flush_cache(h);
 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
-	free_irq(h->intr[2], h);
+	free_irq(h->intr[PERF_MODE_INT], h);
 #ifdef CONFIG_PCI_MSI
 	if (h->msix_vector)
 		pci_disable_msix(h->pdev);
@@ -3470,7 +3692,11 @@
 	pci_free_consistent(h->pdev,
 		h->nr_cmds * sizeof(struct ErrorInfo),
 		h->errinfo_pool, h->errinfo_pool_dhandle);
+	pci_free_consistent(h->pdev, h->reply_pool_size,
+		h->reply_pool, h->reply_pool_dhandle);
 	kfree(h->cmd_pool_bits);
+	kfree(h->blockFetchTable);
+	kfree(h->hba_inquiry_data);
 	/*
 	 * Deliberately omit pci_disable_device(): it does something nasty to
 	 * Smart Array controllers that pci_enable_device does not undo
@@ -3502,6 +3728,129 @@
 	.resume = hpsa_resume,
 };
 
+/* Fill in bucket_map[], given nsgs (the max number of
+ * scatter gather elements supported) and bucket[],
+ * which is an array of 8 integers.  The bucket[] array
+ * contains 8 different DMA transfer sizes (in 16
+ * byte increments) which the controller uses to fetch
+ * commands.  This function fills in bucket_map[], which
+ * maps a given number of scatter gather elements to one of
+ * the 8 DMA transfer sizes.  The point of it is to allow the
+ * controller to only do as much DMA as needed to fetch the
+ * command, with the DMA transfer size encoded in the lower
+ * bits of the command address.
+ */
+static void  calc_bucket_map(int bucket[], int num_buckets,
+	int nsgs, int *bucket_map)
+{
+	int i, j, b, size;
+
+	/* even a command with 0 SGs requires 4 blocks */
+#define MINIMUM_TRANSFER_BLOCKS 4
+#define NUM_BUCKETS 8
+	/* Note, bucket_map must have nsgs+1 entries. */
+	for (i = 0; i <= nsgs; i++) {
+		/* Compute size of a command with i SG entries */
+		size = i + MINIMUM_TRANSFER_BLOCKS;
+		b = num_buckets; /* Assume the biggest bucket */
+		/* Find the bucket that is just big enough */
+		for (j = 0; j < 8; j++) {
+			if (bucket[j] >= size) {
+				b = j;
+				break;
+			}
+		}
+		/* for a command with i SG entries, use bucket b. */
+		bucket_map[i] = b;
+	}
+}
+
+static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+{
+	u32 trans_support;
+	u64 trans_offset;
+	/*  5 = 1 s/g entry or 4k
+	 *  6 = 2 s/g entry or 8k
+	 *  8 = 4 s/g entry or 16k
+	 * 10 = 6 s/g entry or 24k
+	 */
+	int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */
+	int i = 0;
+	int l = 0;
+	unsigned long register_value;
+
+	trans_support = readl(&(h->cfgtable->TransportSupport));
+	if (!(trans_support & PERFORMANT_MODE))
+		return;
+
+	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+	h->max_sg_entries = 32;
+	/* Performant mode ring buffer and supporting data structures */
+	h->reply_pool_size = h->max_commands * sizeof(u64);
+	h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
+				&(h->reply_pool_dhandle));
+
+	/* Need a block fetch table for performant mode */
+	h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
+				sizeof(u32)), GFP_KERNEL);
+
+	if ((h->reply_pool == NULL)
+		|| (h->blockFetchTable == NULL))
+		goto clean_up;
+
+	h->reply_pool_wraparound = 1; /* spec: init to 1 */
+
+	/* Controller spec: zero out this buffer. */
+	memset(h->reply_pool, 0, h->reply_pool_size);
+	h->reply_pool_head = h->reply_pool;
+
+	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
+	bft[7] = h->max_sg_entries + 4;
+	calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
+	for (i = 0; i < 8; i++)
+		writel(bft[i], &h->transtable->BlockFetch[i]);
+
+	/* size of controller ring buffer */
+	writel(h->max_commands, &h->transtable->RepQSize);
+	writel(1, &h->transtable->RepQCount);
+	writel(0, &h->transtable->RepQCtrAddrLow32);
+	writel(0, &h->transtable->RepQCtrAddrHigh32);
+	writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
+	writel(0, &h->transtable->RepQAddr0High32);
+	writel(CFGTBL_Trans_Performant,
+		&(h->cfgtable->HostWrite.TransportRequest));
+	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+	/* under certain very rare conditions, this can take awhile.
+	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+	 * as we enter this code.) */
+	for (l = 0; l < MAX_CONFIG_WAIT; l++) {
+		register_value = readl(h->vaddr + SA5_DOORBELL);
+		if (!(register_value & CFGTBL_ChangeReq))
+			break;
+		/* delay and try again */
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(10);
+	}
+	register_value = readl(&(h->cfgtable->TransportActive));
+	if (!(register_value & CFGTBL_Trans_Performant)) {
+		dev_warn(&h->pdev->dev, "unable to get board into"
+					" performant mode\n");
+		return;
+	}
+
+	/* Change the access methods to the performant access methods */
+	h->access = SA5_performant_access;
+	h->transMethod = CFGTBL_Trans_Performant;
+
+	return;
+
+clean_up:
+	if (h->reply_pool)
+		pci_free_consistent(h->pdev, h->reply_pool_size,
+			h->reply_pool, h->reply_pool_dhandle);
+	kfree(h->blockFetchTable);
+}
+
 /*
  *  This is it.  Register the PCI driver information for the cards we control
  *  the OS will call our registered routines when it finds one of our cards.
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6bd1949..a0502b3 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -33,7 +33,7 @@
 		struct CommandList *c);
 	void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
 	unsigned long (*fifo_full)(struct ctlr_info *h);
-	unsigned long (*intr_pending)(struct ctlr_info *h);
+	bool (*intr_pending)(struct ctlr_info *h);
 	unsigned long (*command_completed)(struct ctlr_info *h);
 };
 
@@ -55,19 +55,20 @@
 	char    *product_name;
 	char	firm_ver[4]; /* Firmware version */
 	struct pci_dev *pdev;
-	__u32	board_id;
+	u32	board_id;
 	void __iomem *vaddr;
 	unsigned long paddr;
 	int 	nr_cmds; /* Number of commands allowed on this controller */
 	struct CfgTable __iomem *cfgtable;
+	int     max_sg_entries;
 	int	interrupts_enabled;
 	int	major;
 	int 	max_commands;
 	int	commands_outstanding;
 	int 	max_outstanding; /* Debug */
 	int	usage_count;  /* number of opens all all minor devices */
-#	define DOORBELL_INT	0
-#	define PERF_MODE_INT	1
+#	define PERF_MODE_INT	0
+#	define DOORBELL_INT	1
 #	define SIMPLE_MODE_INT	2
 #	define MEMQ_MODE_INT	3
 	unsigned int intr[4];
@@ -93,6 +94,9 @@
 	int			nr_frees;
 	int			busy_initializing;
 	int			busy_scanning;
+	int			scan_finished;
+	spinlock_t		scan_lock;
+	wait_queue_head_t	scan_wait_queue;
 	struct mutex		busy_shutting_down;
 	struct list_head	scan_list;
 	struct completion	scan_wait;
@@ -102,6 +106,24 @@
 	int ndevices; /* number of used elements in .dev[] array. */
 #define HPSA_MAX_SCSI_DEVS_PER_HBA 256
 	struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
+	/*
+	 * Performant mode tables.
+	 */
+	u32 trans_support;
+	u32 trans_offset;
+	struct TransTable_struct *transtable;
+	unsigned long transMethod;
+
+	/*
+	 * Performant mode completion buffer
+	 */
+	u64 *reply_pool;
+	dma_addr_t reply_pool_dhandle;
+	u64 *reply_pool_head;
+	size_t reply_pool_size;
+	unsigned char reply_pool_wraparound;
+	u32 *blockFetchTable;
+	unsigned char *hba_inquiry_data;
 };
 #define HPSA_ABORT_MSG 0
 #define HPSA_DEVICE_RESET_MSG 1
@@ -164,9 +186,16 @@
 #define HPSA_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
 
 #define HPSA_ERROR_BIT		0x02
-#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04)
-#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3)
-#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3)
+
+/* Performant mode flags */
+#define SA5_PERF_INTR_PENDING   0x04
+#define SA5_PERF_INTR_OFF       0x05
+#define SA5_OUTDB_STATUS_PERF_BIT       0x01
+#define SA5_OUTDB_CLEAR_PERF_BIT        0x01
+#define SA5_OUTDB_CLEAR         0xA0
+#define SA5_OUTDB_CLEAR_PERF_BIT        0x01
+#define SA5_OUTDB_STATUS        0x9C
+
 
 #define HPSA_INTR_ON 	1
 #define HPSA_INTR_OFF	0
@@ -176,10 +205,8 @@
 static void SA5_submit_command(struct ctlr_info *h,
 	struct CommandList *c)
 {
-#ifdef HPSA_DEBUG
-	 printk(KERN_WARNING "hpsa: Sending %x - down to controller\n",
-		c->busaddr);
-#endif /* HPSA_DEBUG */
+	dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
+		c->Header.Tag.lower);
 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
 	h->commands_outstanding++;
 	if (h->commands_outstanding > h->max_outstanding)
@@ -202,6 +229,52 @@
 			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
 	}
 }
+
+static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
+{
+	if (val) { /* turn on interrupts */
+		h->interrupts_enabled = 1;
+		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+	} else {
+		h->interrupts_enabled = 0;
+		writel(SA5_PERF_INTR_OFF,
+			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+	}
+}
+
+static unsigned long SA5_performant_completed(struct ctlr_info *h)
+{
+	unsigned long register_value = FIFO_EMPTY;
+
+	/* flush the controller write of the reply queue by reading
+	 * outbound doorbell status register.
+	 */
+	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+	/* msi auto clears the interrupt pending bit. */
+	if (!(h->msi_vector || h->msix_vector)) {
+		writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
+		/* Do a read in order to flush the write to the controller
+		 * (as per spec.)
+		 */
+		register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+	}
+
+	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+		register_value = *(h->reply_pool_head);
+		(h->reply_pool_head)++;
+		h->commands_outstanding--;
+	} else {
+		register_value = FIFO_EMPTY;
+	}
+	/* Check for wraparound */
+	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+		h->reply_pool_head = h->reply_pool;
+		h->reply_pool_wraparound ^= 1;
+	}
+
+	return register_value;
+}
+
 /*
  *  Returns true if fifo is full.
  *
@@ -228,10 +301,10 @@
 
 #ifdef HPSA_DEBUG
 	if (register_value != FIFO_EMPTY)
-		printk(KERN_INFO "hpsa:  Read %lx back from board\n",
+		dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
 			register_value);
 	else
-		printk(KERN_INFO "hpsa:  FIFO Empty read\n");
+		dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n");
 #endif
 
 	return register_value;
@@ -239,18 +312,28 @@
 /*
  *	Returns true if an interrupt is pending..
  */
-static unsigned long SA5_intr_pending(struct ctlr_info *h)
+static bool SA5_intr_pending(struct ctlr_info *h)
 {
 	unsigned long register_value  =
 		readl(h->vaddr + SA5_INTR_STATUS);
-#ifdef HPSA_DEBUG
-	printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value);
-#endif  /* HPSA_DEBUG */
-	if (register_value &  SA5_INTR_PENDING)
-		return  1;
-	return 0 ;
+	dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
+	return register_value & SA5_INTR_PENDING;
 }
 
+static bool SA5_performant_intr_pending(struct ctlr_info *h)
+{
+	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
+
+	if (!register_value)
+		return false;
+
+	if (h->msi_vector || h->msix_vector)
+		return true;
+
+	/* Read outbound doorbell to flush */
+	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
+}
 
 static struct access_method SA5_access = {
 	SA5_submit_command,
@@ -260,14 +343,19 @@
 	SA5_completed,
 };
 
+static struct access_method SA5_performant_access = {
+	SA5_submit_command,
+	SA5_performant_intr_mask,
+	SA5_fifo_full,
+	SA5_performant_intr_pending,
+	SA5_performant_completed,
+};
+
 struct board_type {
-	__u32	board_id;
+	u32	board_id;
 	char	*product_name;
 	struct access_method *access;
 };
 
-
-/* end of old hpsa_scsi.h file */
-
 #endif /* HPSA_H */
 
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 12d7138..3e0abdf 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -101,19 +101,20 @@
 #define CFGTBL_AccCmds          0x00000001l
 
 #define CFGTBL_Trans_Simple     0x00000002l
+#define CFGTBL_Trans_Performant 0x00000004l
 
 #define CFGTBL_BusType_Ultra2   0x00000001l
 #define CFGTBL_BusType_Ultra3   0x00000002l
 #define CFGTBL_BusType_Fibre1G  0x00000100l
 #define CFGTBL_BusType_Fibre2G  0x00000200l
 struct vals32 {
-	__u32   lower;
-	__u32   upper;
+	u32   lower;
+	u32   upper;
 };
 
 union u64bit {
 	struct vals32 val32;
-	__u64 val;
+	u64 val;
 };
 
 /* FIXME this is a per controller value (barf!) */
@@ -126,34 +127,34 @@
 
 #define HPSA_INQUIRY 0x12
 struct InquiryData {
-	__u8 data_byte[36];
+	u8 data_byte[36];
 };
 
 #define HPSA_REPORT_LOG 0xc2    /* Report Logical LUNs */
 #define HPSA_REPORT_PHYS 0xc3   /* Report Physical LUNs */
 struct ReportLUNdata {
-	__u8 LUNListLength[4];
-	__u32 reserved;
-	__u8 LUN[HPSA_MAX_LUN][8];
+	u8 LUNListLength[4];
+	u32 reserved;
+	u8 LUN[HPSA_MAX_LUN][8];
 };
 
 struct ReportExtendedLUNdata {
-	__u8 LUNListLength[4];
-	__u8 extended_response_flag;
-	__u8 reserved[3];
-	__u8 LUN[HPSA_MAX_LUN][24];
+	u8 LUNListLength[4];
+	u8 extended_response_flag;
+	u8 reserved[3];
+	u8 LUN[HPSA_MAX_LUN][24];
 };
 
 struct SenseSubsystem_info {
-	__u8 reserved[36];
-	__u8 portname[8];
-	__u8 reserved1[1108];
+	u8 reserved[36];
+	u8 portname[8];
+	u8 reserved1[1108];
 };
 
 #define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
 struct ReadCapdata {
-	__u8 total_size[4];	/* Total size in blocks */
-	__u8 block_size[4];	/* Size of blocks in bytes */
+	u8 total_size[4];	/* Total size in blocks */
+	u8 block_size[4];	/* Size of blocks in bytes */
 };
 
 #if 0
@@ -174,112 +175,131 @@
 /* Command List Structure */
 union SCSI3Addr {
 	struct {
-		__u8 Dev;
-		__u8 Bus:6;
-		__u8 Mode:2;        /* b00 */
+		u8 Dev;
+		u8 Bus:6;
+		u8 Mode:2;        /* b00 */
 	} PeripDev;
 	struct {
-		__u8 DevLSB;
-		__u8 DevMSB:6;
-		__u8 Mode:2;        /* b01 */
+		u8 DevLSB;
+		u8 DevMSB:6;
+		u8 Mode:2;        /* b01 */
 	} LogDev;
 	struct {
-		__u8 Dev:5;
-		__u8 Bus:3;
-		__u8 Targ:6;
-		__u8 Mode:2;        /* b10 */
+		u8 Dev:5;
+		u8 Bus:3;
+		u8 Targ:6;
+		u8 Mode:2;        /* b10 */
 	} LogUnit;
 };
 
 struct PhysDevAddr {
-	__u32             TargetId:24;
-	__u32             Bus:6;
-	__u32             Mode:2;
+	u32             TargetId:24;
+	u32             Bus:6;
+	u32             Mode:2;
 	/* 2 level target device addr */
 	union SCSI3Addr  Target[2];
 };
 
 struct LogDevAddr {
-	__u32            VolId:30;
-	__u32            Mode:2;
-	__u8             reserved[4];
+	u32            VolId:30;
+	u32            Mode:2;
+	u8             reserved[4];
 };
 
 union LUNAddr {
-	__u8               LunAddrBytes[8];
+	u8               LunAddrBytes[8];
 	union SCSI3Addr    SCSI3Lun[4];
 	struct PhysDevAddr PhysDev;
 	struct LogDevAddr  LogDev;
 };
 
 struct CommandListHeader {
-	__u8              ReplyQueue;
-	__u8              SGList;
-	__u16             SGTotal;
+	u8              ReplyQueue;
+	u8              SGList;
+	u16             SGTotal;
 	struct vals32     Tag;
 	union LUNAddr     LUN;
 };
 
 struct RequestBlock {
-	__u8   CDBLen;
+	u8   CDBLen;
 	struct {
-		__u8 Type:3;
-		__u8 Attribute:3;
-		__u8 Direction:2;
+		u8 Type:3;
+		u8 Attribute:3;
+		u8 Direction:2;
 	} Type;
-	__u16  Timeout;
-	__u8   CDB[16];
+	u16  Timeout;
+	u8   CDB[16];
 };
 
 struct ErrDescriptor {
 	struct vals32 Addr;
-	__u32  Len;
+	u32  Len;
 };
 
 struct SGDescriptor {
 	struct vals32 Addr;
-	__u32  Len;
-	__u32  Ext;
+	u32  Len;
+	u32  Ext;
 };
 
 union MoreErrInfo {
 	struct {
-		__u8  Reserved[3];
-		__u8  Type;
-		__u32 ErrorInfo;
+		u8  Reserved[3];
+		u8  Type;
+		u32 ErrorInfo;
 	} Common_Info;
 	struct {
-		__u8  Reserved[2];
-		__u8  offense_size; /* size of offending entry */
-		__u8  offense_num;  /* byte # of offense 0-base */
-		__u32 offense_value;
+		u8  Reserved[2];
+		u8  offense_size; /* size of offending entry */
+		u8  offense_num;  /* byte # of offense 0-base */
+		u32 offense_value;
 	} Invalid_Cmd;
 };
 struct ErrorInfo {
-	__u8               ScsiStatus;
-	__u8               SenseLen;
-	__u16              CommandStatus;
-	__u32              ResidualCnt;
+	u8               ScsiStatus;
+	u8               SenseLen;
+	u16              CommandStatus;
+	u32              ResidualCnt;
 	union MoreErrInfo  MoreErrInfo;
-	__u8               SenseInfo[SENSEINFOBYTES];
+	u8               SenseInfo[SENSEINFOBYTES];
 };
 /* Command types */
 #define CMD_IOCTL_PEND  0x01
 #define CMD_SCSI	0x03
 
-struct ctlr_info; /* defined in hpsa.h */
-/* The size of this structure needs to be divisible by 8
- * od on all architectures, because the controller uses 2
- * lower bits of the address, and the driver uses 1 lower
- * bit (3 bits total.)
+/* This structure needs to be divisible by 32 for new
+ * indexing method and performant mode.
  */
+#define PAD32 32
+#define PAD64DIFF 0
+#define USEEXTRA ((sizeof(void *) - 4)/4)
+#define PADSIZE (PAD32 + PAD64DIFF * USEEXTRA)
+
+#define DIRECT_LOOKUP_SHIFT 5
+#define DIRECT_LOOKUP_BIT 0x10
+
+#define HPSA_ERROR_BIT          0x02
+struct ctlr_info; /* defined in hpsa.h */
+/* The size of this structure needs to be divisible by 32
+ * on all architectures because low 5 bits of the addresses
+ * are used as follows:
+ *
+ * bit 0: to device, used to indicate "performant mode" command
+ *        from device, indidcates error status.
+ * bit 1-3: to device, indicates block fetch table entry for
+ *          reducing DMA in fetching commands from host memory.
+ * bit 4: used to indicate whether tag is "direct lookup" (index),
+ *        or a bus address.
+ */
+
 struct CommandList {
 	struct CommandListHeader Header;
 	struct RequestBlock      Request;
 	struct ErrDescriptor     ErrDesc;
 	struct SGDescriptor      SG[MAXSGENTRIES];
 	/* information associated with the command */
-	__u32			   busaddr; /* physical addr of this record */
+	u32			   busaddr; /* physical addr of this record */
 	struct ErrorInfo *err_info; /* pointer to the allocated mem */
 	struct ctlr_info	   *h;
 	int			   cmd_type;
@@ -291,35 +311,63 @@
 	struct completion *waiting;
 	int	 retry_count;
 	void   *scsi_cmd;
+
+/* on 64 bit architectures, to get this to be 32-byte-aligned
+ * it so happens we need no padding, on 32 bit systems,
+ * we need 8 bytes of padding.   This does that.
+ */
+#define COMMANDLIST_PAD ((8 - sizeof(long))/4 * 8)
+	u8 pad[COMMANDLIST_PAD];
+
 };
 
 /* Configuration Table Structure */
 struct HostWrite {
-	__u32 TransportRequest;
-	__u32 Reserved;
-	__u32 CoalIntDelay;
-	__u32 CoalIntCount;
+	u32 TransportRequest;
+	u32 Reserved;
+	u32 CoalIntDelay;
+	u32 CoalIntCount;
 };
 
+#define SIMPLE_MODE     0x02
+#define PERFORMANT_MODE 0x04
+#define MEMQ_MODE       0x08
+
 struct CfgTable {
-	__u8             Signature[4];
-	__u32            SpecValence;
-	__u32            TransportSupport;
-	__u32            TransportActive;
-	struct HostWrite HostWrite;
-	__u32            CmdsOutMax;
-	__u32            BusTypes;
-	__u32            Reserved;
-	__u8             ServerName[16];
-	__u32            HeartBeat;
-	__u32            SCSI_Prefetch;
+	u8            Signature[4];
+	u32		SpecValence;
+	u32           TransportSupport;
+	u32           TransportActive;
+	struct 		HostWrite HostWrite;
+	u32           CmdsOutMax;
+	u32           BusTypes;
+	u32           TransMethodOffset;
+	u8            ServerName[16];
+	u32           HeartBeat;
+	u32           SCSI_Prefetch;
+	u32	 	MaxScatterGatherElements;
+	u32		MaxLogicalUnits;
+	u32		MaxPhysicalDevices;
+	u32		MaxPhysicalDrivesPerLogicalUnit;
+	u32		MaxPerformantModeCommands;
+};
+
+#define NUM_BLOCKFETCH_ENTRIES 8
+struct TransTable_struct {
+	u32            BlockFetch[NUM_BLOCKFETCH_ENTRIES];
+	u32            RepQSize;
+	u32            RepQCount;
+	u32            RepQCtrAddrLow32;
+	u32            RepQCtrAddrHigh32;
+	u32            RepQAddr0Low32;
+	u32            RepQAddr0High32;
 };
 
 struct hpsa_pci_info {
 	unsigned char	bus;
 	unsigned char	dev_fn;
 	unsigned short	domain;
-	__u32		board_id;
+	u32		board_id;
 };
 
 #pragma pack()
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 9c1e6a5..9a4b69d 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -2336,7 +2336,7 @@
 	char *cur = str;
 	int i = 1;
 
-	while (cur && isdigit(*cur) && i <= IM_MAX_HOSTS) {
+	while (cur && isdigit(*cur) && i < IM_MAX_HOSTS) {
 		ints[i++] = simple_strtoul(cur, NULL, 0);
 		if ((cur = strchr(cur, ',')) != NULL)
 			cur++;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e475b79..e3a18e0 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -40,7 +40,7 @@
  * (CRQ), which is just a buffer of 16 byte entries in the receiver's 
  * Senders cannot access the buffer directly, but send messages by
  * making a hypervisor call and passing in the 16 bytes.  The hypervisor
- * puts the message in the next 16 byte space in round-robbin fashion,
+ * puts the message in the next 16 byte space in round-robin fashion,
  * turns on the high order bit of the message (the valid bit), and 
  * generates an interrupt to the receiver (if interrupts are turned on.) 
  * The receiver just turns off the valid bit when they have copied out
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 517da3f..8a89ba9 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -584,9 +584,10 @@
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+	struct socket *sock = tcp_sw_conn->sock;
 
 	/* userspace may have goofed up and not bound us */
-	if (!tcp_sw_conn->sock)
+	if (!sock)
 		return;
 	/*
 	 * Make sure our recv side is stopped.
@@ -597,6 +598,11 @@
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
 	write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
 
+	if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) {
+		sock->sk->sk_err = EIO;
+		wake_up_interruptible(sock->sk->sk_sleep);
+	}
+
 	iscsi_conn_stop(cls_conn, flag);
 	iscsi_sw_tcp_release_conn(conn);
 }
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c28a712..703eb6a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1919,10 +1919,11 @@
 static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
 {
 	enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
-	struct iscsi_task *task = NULL;
+	struct iscsi_task *task = NULL, *running_task;
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_session *session;
 	struct iscsi_conn *conn;
+	int i;
 
 	cls_session = starget_to_session(scsi_target(sc->device));
 	session = cls_session->dd_data;
@@ -1947,8 +1948,15 @@
 	}
 
 	task = (struct iscsi_task *)sc->SCp.ptr;
-	if (!task)
+	if (!task) {
+		/*
+		 * Raced with completion. Just reset timer, and let it
+		 * complete normally
+		 */
+		rc = BLK_EH_RESET_TIMER;
 		goto done;
+	}
+
 	/*
 	 * If we have sent (at least queued to the network layer) a pdu or
 	 * recvd one for the task since the last timeout ask for
@@ -1956,10 +1964,10 @@
 	 * we can check if it is the task or connection when we send the
 	 * nop as a ping.
 	 */
-	if (time_after_eq(task->last_xfer, task->last_timeout)) {
+	if (time_after(task->last_xfer, task->last_timeout)) {
 		ISCSI_DBG_EH(session, "Command making progress. Asking "
 			     "scsi-ml for more time to complete. "
-			     "Last data recv at %lu. Last timeout was at "
+			     "Last data xfer at %lu. Last timeout was at "
 			     "%lu\n.", task->last_xfer, task->last_timeout);
 		task->have_checked_conn = false;
 		rc = BLK_EH_RESET_TIMER;
@@ -1977,6 +1985,43 @@
 		goto done;
 	}
 
+	for (i = 0; i < conn->session->cmds_max; i++) {
+		running_task = conn->session->cmds[i];
+		if (!running_task->sc || running_task == task ||
+		     running_task->state != ISCSI_TASK_RUNNING)
+			continue;
+
+		/*
+		 * Only check if cmds started before this one have made
+		 * progress, or this could never fail
+		 */
+		if (time_after(running_task->sc->jiffies_at_alloc,
+			       task->sc->jiffies_at_alloc))
+			continue;
+
+		if (time_after(running_task->last_xfer, task->last_timeout)) {
+			/*
+			 * This task has not made progress, but a task
+			 * started before us has transferred data since
+			 * we started/last-checked. We could be queueing
+			 * too many tasks or the LU is bad.
+			 *
+			 * If the device is bad the cmds ahead of us on
+			 * other devs will complete, and this loop will
+			 * eventually fail starting the scsi eh.
+			 */
+			ISCSI_DBG_EH(session, "Command has not made progress "
+				     "but commands ahead of it have. "
+				     "Asking scsi-ml for more time to "
+				     "complete. Our last xfer vs running task "
+				     "last xfer %lu/%lu. Last check %lu.\n",
+				     task->last_xfer, running_task->last_xfer,
+				     task->last_timeout);
+			rc = BLK_EH_RESET_TIMER;
+			goto done;
+		}
+	}
+
 	/* Assumes nop timeout is shorter than scsi cmd timeout */
 	if (task->have_checked_conn)
 		goto done;
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index ab19b3b..2277516 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -1,5 +1,5 @@
 /*
- * SCSI RDAM Protocol lib functions
+ * SCSI RDMA Protocol lib functions
  *
  * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
  *
@@ -328,7 +328,7 @@
 	int offset, err = 0;
 	u8 format;
 
-	offset = cmd->add_cdb_len * 4;
+	offset = cmd->add_cdb_len & ~3;
 
 	dir = srp_cmd_direction(cmd);
 	if (dir == DMA_FROM_DEVICE)
@@ -366,7 +366,7 @@
 {
 	struct srp_direct_buf *md;
 	struct srp_indirect_buf *id;
-	int len = 0, offset = cmd->add_cdb_len * 4;
+	int len = 0, offset = cmd->add_cdb_len & ~3;
 	u8 fmt;
 
 	if (dir == DMA_TO_DEVICE)
@@ -440,6 +440,6 @@
 }
 EXPORT_SYMBOL_GPL(srp_cmd_queue);
 
-MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions");
+MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
 MODULE_AUTHOR("FUJITA Tomonori");
 MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1cc23a6..84b6964 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -315,6 +315,9 @@
 #define FC_VPORT_NEEDS_REG_VPI	0x80000  /* Needs to have its vpi registered */
 #define FC_RSCN_DEFERRED	0x100000 /* A deferred RSCN being processed */
 #define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
+#define FC_VPORT_CVL_RCVD	0x400000 /* VLink failed due to CVL	 */
+#define FC_VFI_REGISTERED	0x800000 /* VFI is registered */
+#define FC_FDISC_COMPLETED	0x1000000/* FDISC completed */
 
 	uint32_t ct_flags;
 #define FC_CT_RFF_ID		0x1	 /* RFF_ID accepted by switch */
@@ -448,6 +451,8 @@
 	uint32_t ctxt_id;
 	uint32_t SID;
 	uint32_t oxid;
+	uint32_t flags;
+#define UNSOL_VALID	0x00000001
 };
 
 struct lpfc_hba {
@@ -499,6 +504,10 @@
 		(struct lpfc_hba *);
 	void (*lpfc_stop_port)
 		(struct lpfc_hba *);
+	int (*lpfc_hba_init_link)
+		(struct lpfc_hba *);
+	int (*lpfc_hba_down_link)
+		(struct lpfc_hba *);
 
 
 	/* SLI4 specific HBA data structure */
@@ -613,6 +622,7 @@
 	uint32_t cfg_enable_bg;
 	uint32_t cfg_log_verbose;
 	uint32_t cfg_aer_support;
+	uint32_t cfg_suppress_link_up;
 
 	lpfc_vpd_t vpd;		/* vital product data */
 
@@ -790,7 +800,7 @@
 	uint16_t vlan_id;
 	struct list_head fcf_conn_rec_list;
 
-	struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */
+	spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
 	struct list_head ct_ev_waiters;
 	struct unsol_rcv_ct_ctx ct_ctx[64];
 	uint32_t ctx_idx;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 91542f7..c992e83 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -482,6 +482,41 @@
 }
 
 /**
+ * lpfc_link_state_store - Transition the link_state on an HBA port
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL if the buffer is not "up" or "down"
+ * return from link state change function if non-zero
+ * length of the buf on success
+ **/
+static ssize_t
+lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	int status = -EINVAL;
+
+	if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
+			(phba->link_state == LPFC_LINK_DOWN))
+		status = phba->lpfc_hba_init_link(phba);
+	else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
+			(phba->link_state >= LPFC_LINK_UP))
+		status = phba->lpfc_hba_down_link(phba);
+
+	if (status == 0)
+		return strlen(buf);
+	else
+		return status;
+}
+
+/**
  * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
  * @dev: class device that is converted into a Scsi_host.
  * @attr: device attribute, not used.
@@ -1219,7 +1254,7 @@
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
 	struct lpfc_hba   *phba = vport->phba;\
-	int val = 0;\
+	uint val = 0;\
 	val = phba->cfg_##attr;\
 	return snprintf(buf, PAGE_SIZE, "%d\n",\
 			phba->cfg_##attr);\
@@ -1247,7 +1282,7 @@
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
 	struct lpfc_hba   *phba = vport->phba;\
-	int val = 0;\
+	uint val = 0;\
 	val = phba->cfg_##attr;\
 	return snprintf(buf, PAGE_SIZE, "%#x\n",\
 			phba->cfg_##attr);\
@@ -1274,7 +1309,7 @@
  **/
 #define lpfc_param_init(attr, default, minval, maxval)	\
 static int \
-lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
+lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
 { \
 	if (val >= minval && val <= maxval) {\
 		phba->cfg_##attr = val;\
@@ -1309,7 +1344,7 @@
  **/
 #define lpfc_param_set(attr, default, minval, maxval)	\
 static int \
-lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
+lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
 { \
 	if (val >= minval && val <= maxval) {\
 		phba->cfg_##attr = val;\
@@ -1350,7 +1385,7 @@
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
 	struct lpfc_hba   *phba = vport->phba;\
-	int val=0;\
+	uint val = 0;\
 	if (!isdigit(buf[0]))\
 		return -EINVAL;\
 	if (sscanf(buf, "%i", &val) != 1)\
@@ -1382,7 +1417,7 @@
 { \
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
-	int val = 0;\
+	uint val = 0;\
 	val = vport->cfg_##attr;\
 	return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
 }
@@ -1409,7 +1444,7 @@
 { \
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
-	int val = 0;\
+	uint val = 0;\
 	val = vport->cfg_##attr;\
 	return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
 }
@@ -1434,7 +1469,7 @@
  **/
 #define lpfc_vport_param_init(attr, default, minval, maxval)	\
 static int \
-lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
+lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
 { \
 	if (val >= minval && val <= maxval) {\
 		vport->cfg_##attr = val;\
@@ -1466,7 +1501,7 @@
  **/
 #define lpfc_vport_param_set(attr, default, minval, maxval)	\
 static int \
-lpfc_##attr##_set(struct lpfc_vport *vport, int val) \
+lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
 { \
 	if (val >= minval && val <= maxval) {\
 		vport->cfg_##attr = val;\
@@ -1502,7 +1537,7 @@
 { \
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
-	int val=0;\
+	uint val = 0;\
 	if (!isdigit(buf[0]))\
 		return -EINVAL;\
 	if (sscanf(buf, "%i", &val) != 1)\
@@ -1515,22 +1550,22 @@
 
 
 #define LPFC_ATTR(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_param_init(name, defval, minval, maxval)
 
 #define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_param_show(name)\
 lpfc_param_init(name, defval, minval, maxval)\
 static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
 
 #define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_param_show(name)\
 lpfc_param_init(name, defval, minval, maxval)\
@@ -1540,16 +1575,16 @@
 		   lpfc_##name##_show, lpfc_##name##_store)
 
 #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_param_hex_show(name)\
 lpfc_param_init(name, defval, minval, maxval)\
 static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
 
 #define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_param_hex_show(name)\
 lpfc_param_init(name, defval, minval, maxval)\
@@ -1559,22 +1594,22 @@
 		   lpfc_##name##_show, lpfc_##name##_store)
 
 #define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_vport_param_init(name, defval, minval, maxval)
 
 #define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_vport_param_show(name)\
 lpfc_vport_param_init(name, defval, minval, maxval)\
 static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
 
 #define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_vport_param_show(name)\
 lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1584,16 +1619,16 @@
 		   lpfc_##name##_show, lpfc_##name##_store)
 
 #define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_vport_param_hex_show(name)\
 lpfc_vport_param_init(name, defval, minval, maxval)\
 static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
 
 #define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_vport_param_hex_show(name)\
 lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1614,7 +1649,8 @@
 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
-static DEVICE_ATTR(link_state, S_IRUGO, lpfc_link_state_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
+		lpfc_link_state_store);
 static DEVICE_ATTR(option_rom_version, S_IRUGO,
 		   lpfc_option_rom_version_show, NULL);
 static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
@@ -1897,6 +1933,15 @@
 			 lpfc_enable_npiv_show, NULL);
 
 /*
+# lpfc_suppress_link_up:  Bring link up at initialization
+#            0x0  = bring link up (issue MBX_INIT_LINK)
+#            0x1  = do NOT bring link up at initialization(MBX_INIT_LINK)
+#            0x2  = never bring up link
+# Default value is 0.
+*/
+LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization");
+
+/*
 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
 # until the timer expires. Value range is [0,255]. Default value is 30.
 */
@@ -3114,12 +3159,12 @@
 /*
 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
 #		support this feature
-#       0  = MSI disabled (default)
+#       0  = MSI disabled
 #       1  = MSI enabled
-#       2  = MSI-X enabled
-# Value range is [0,2]. Default value is 0.
+#       2  = MSI-X enabled (default)
+# Value range is [0,2]. Default value is 2.
 */
-LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
+LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
 	    "MSI-X (2), if possible");
 
 /*
@@ -3278,6 +3323,7 @@
 	&dev_attr_lpfc_prot_sg_seg_cnt,
 	&dev_attr_lpfc_aer_support,
 	&dev_attr_lpfc_aer_state_cleanup,
+	&dev_attr_lpfc_suppress_link_up,
 	NULL,
 };
 
@@ -4456,7 +4502,7 @@
 	lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
 	lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
 	lpfc_aer_support_init(phba, lpfc_aer_support);
-
+	lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
 	return;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index a5d9048..f3f1bf1 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2009 Emulex.  All rights reserved.                *
+ * Copyright (C) 2009-2010 Emulex.  All rights reserved.                *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -21,6 +21,7 @@
 #include <linux/interrupt.h>
 #include <linux/mempool.h>
 #include <linux/pci.h>
+#include <linux/delay.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -33,6 +34,7 @@
 #include "lpfc_sli.h"
 #include "lpfc_sli4.h"
 #include "lpfc_nl.h"
+#include "lpfc_bsg.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
 #include "lpfc.h"
@@ -41,14 +43,183 @@
 #include "lpfc_vport.h"
 #include "lpfc_version.h"
 
+struct lpfc_bsg_event {
+	struct list_head node;
+	struct kref kref;
+	wait_queue_head_t wq;
+
+	/* Event type and waiter identifiers */
+	uint32_t type_mask;
+	uint32_t req_id;
+	uint32_t reg_id;
+
+	/* next two flags are here for the auto-delete logic */
+	unsigned long wait_time_stamp;
+	int waiting;
+
+	/* seen and not seen events */
+	struct list_head events_to_get;
+	struct list_head events_to_see;
+
+	/* job waiting for this event to finish */
+	struct fc_bsg_job *set_job;
+};
+
+struct lpfc_bsg_iocb {
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_iocbq *rspiocbq;
+	struct lpfc_dmabuf *bmp;
+	struct lpfc_nodelist *ndlp;
+
+	/* job waiting for this iocb to finish */
+	struct fc_bsg_job *set_job;
+};
+
+struct lpfc_bsg_mbox {
+	LPFC_MBOXQ_t *pmboxq;
+	MAILBOX_t *mb;
+
+	/* job waiting for this mbox command to finish */
+	struct fc_bsg_job *set_job;
+};
+
+#define TYPE_EVT 	1
+#define TYPE_IOCB	2
+#define TYPE_MBOX	3
+struct bsg_job_data {
+	uint32_t type;
+	union {
+		struct lpfc_bsg_event *evt;
+		struct lpfc_bsg_iocb iocb;
+		struct lpfc_bsg_mbox mbox;
+	} context_un;
+};
+
+struct event_data {
+	struct list_head node;
+	uint32_t type;
+	uint32_t immed_dat;
+	void *data;
+	uint32_t len;
+};
+
+#define BUF_SZ_4K 4096
+#define SLI_CT_ELX_LOOPBACK 0x10
+
+enum ELX_LOOPBACK_CMD {
+	ELX_LOOPBACK_XRI_SETUP,
+	ELX_LOOPBACK_DATA,
+};
+
+#define ELX_LOOPBACK_HEADER_SZ \
+	(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
+
+struct lpfc_dmabufext {
+	struct lpfc_dmabuf dma;
+	uint32_t size;
+	uint32_t flag;
+};
+
 /**
- * lpfc_bsg_rport_ct - send a CT command from a bsg request
- * @job: fc_bsg_job to handle
- */
-static int
-lpfc_bsg_rport_ct(struct fc_bsg_job *job)
+ * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_bsg_send_mgmt_cmd function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from another thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
 {
-	struct Scsi_Host *shost = job->shost;
+	unsigned long iflags;
+	struct bsg_job_data *dd_data;
+	struct fc_bsg_job *job;
+	IOCB_t *rsp;
+	struct lpfc_dmabuf *bmp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_bsg_iocb *iocb;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = cmdiocbq->context1;
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		return;
+	}
+
+	iocb = &dd_data->context_un.iocb;
+	job = iocb->set_job;
+	job->dd_data = NULL; /* so timeout handler does not reply */
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
+	if (cmdiocbq->context2 && rspiocbq)
+		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
+		       &rspiocbq->iocb, sizeof(IOCB_t));
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	bmp = iocb->bmp;
+	rspiocbq = iocb->rspiocbq;
+	rsp = &rspiocbq->iocb;
+	ndlp = iocb->ndlp;
+
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+	if (rsp->ulpStatus) {
+		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+			switch (rsp->un.ulpWord[4] & 0xff) {
+			case IOERR_SEQUENCE_TIMEOUT:
+				rc = -ETIMEDOUT;
+				break;
+			case IOERR_INVALID_RPI:
+				rc = -EFAULT;
+				break;
+			default:
+				rc = -EACCES;
+				break;
+			}
+		} else
+			rc = -EACCES;
+	} else
+		job->reply->reply_payload_rcv_len =
+			rsp->un.genreq64.bdl.bdeSize;
+
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	lpfc_sli_release_iocbq(phba, rspiocbq);
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	lpfc_nlp_put(ndlp);
+	kfree(bmp);
+	kfree(dd_data);
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace */
+	job->job_done(job);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	return;
+}
+
+/**
+ * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
+ * @job: fc_bsg_job to handle
+ **/
+static int
+lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
+{
 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_rport_data *rdata = job->rport->dd_data;
@@ -65,57 +236,60 @@
 	struct scatterlist *sgel = NULL;
 	int numbde;
 	dma_addr_t busaddr;
+	struct bsg_job_data *dd_data;
+	uint32_t creg_val;
 	int rc = 0;
 
 	/* in case no data is transferred */
 	job->reply->reply_payload_rcv_len = 0;
 
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2733 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto no_dd_data;
+	}
+
 	if (!lpfc_nlp_get(ndlp)) {
-		job->reply->result = -ENODEV;
-		return 0;
+		rc = -ENODEV;
+		goto no_ndlp;
+	}
+
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc = -ENOMEM;
+		goto free_ndlp;
 	}
 
 	if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
 		rc = -ENODEV;
-		goto free_ndlp_exit;
+		goto free_bmp;
 	}
 
-	spin_lock_irq(shost->host_lock);
 	cmdiocbq = lpfc_sli_get_iocbq(phba);
 	if (!cmdiocbq) {
 		rc = -ENOMEM;
-		spin_unlock_irq(shost->host_lock);
-		goto free_ndlp_exit;
+		goto free_bmp;
 	}
-	cmd = &cmdiocbq->iocb;
 
+	cmd = &cmdiocbq->iocb;
 	rspiocbq = lpfc_sli_get_iocbq(phba);
 	if (!rspiocbq) {
 		rc = -ENOMEM;
 		goto free_cmdiocbq;
 	}
-	spin_unlock_irq(shost->host_lock);
 
 	rsp = &rspiocbq->iocb;
-
-	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-	if (!bmp) {
-		rc = -ENOMEM;
-		spin_lock_irq(shost->host_lock);
-		goto free_rspiocbq;
-	}
-
-	spin_lock_irq(shost->host_lock);
 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
 	if (!bmp->virt) {
 		rc = -ENOMEM;
-		goto free_bmp;
+		goto free_rspiocbq;
 	}
-	spin_unlock_irq(shost->host_lock);
 
 	INIT_LIST_HEAD(&bmp->list);
 	bpl = (struct ulp_bde64 *) bmp->virt;
-
 	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
 				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
 	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
@@ -157,78 +331,152 @@
 	cmd->ulpContext = ndlp->nlp_rpi;
 	cmd->ulpOwner = OWN_CHIP;
 	cmdiocbq->vport = phba->pport;
-	cmdiocbq->context1 = NULL;
-	cmdiocbq->context2 = NULL;
+	cmdiocbq->context3 = bmp;
 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
-
 	timeout = phba->fc_ratov * 2;
-	job->dd_data = cmdiocbq;
+	cmd->ulpTimeout = timeout;
 
-	rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
-					timeout + LPFC_DRVR_TIMEOUT);
+	cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
+	cmdiocbq->context1 = dd_data;
+	cmdiocbq->context2 = rspiocbq;
+	dd_data->type = TYPE_IOCB;
+	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
+	dd_data->context_un.iocb.rspiocbq = rspiocbq;
+	dd_data->context_un.iocb.set_job = job;
+	dd_data->context_un.iocb.bmp = bmp;
+	dd_data->context_un.iocb.ndlp = ndlp;
 
-	if (rc != IOCB_TIMEDOUT) {
-		pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-		pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		creg_val = readl(phba->HCregaddr);
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
 	}
 
-	if (rc == IOCB_TIMEDOUT) {
-		lpfc_sli_release_iocbq(phba, rspiocbq);
-		rc = -EACCES;
-		goto free_ndlp_exit;
-	}
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
 
-	if (rc != IOCB_SUCCESS) {
-		rc = -EACCES;
-		goto free_outdmp;
-	}
+	if (rc == IOCB_SUCCESS)
+		return 0; /* done for now */
 
-	if (rsp->ulpStatus) {
-		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-			switch (rsp->un.ulpWord[4] & 0xff) {
-			case IOERR_SEQUENCE_TIMEOUT:
-				rc = -ETIMEDOUT;
-				break;
-			case IOERR_INVALID_RPI:
-				rc = -EFAULT;
-				break;
-			default:
-				rc = -EACCES;
-				break;
-			}
-			goto free_outdmp;
-		}
-	} else
-		job->reply->reply_payload_rcv_len =
-			rsp->un.genreq64.bdl.bdeSize;
+	/* iocb failed so cleanup */
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 
-free_outdmp:
-	spin_lock_irq(shost->host_lock);
 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-free_bmp:
-	kfree(bmp);
+
 free_rspiocbq:
 	lpfc_sli_release_iocbq(phba, rspiocbq);
 free_cmdiocbq:
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
-	spin_unlock_irq(shost->host_lock);
-free_ndlp_exit:
+free_bmp:
+	kfree(bmp);
+free_ndlp:
 	lpfc_nlp_put(ndlp);
-
+no_ndlp:
+	kfree(dd_data);
+no_dd_data:
 	/* make error code available to userspace */
 	job->reply->result = rc;
+	job->dd_data = NULL;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_bsg_rport_els_cmp function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct bsg_job_data *dd_data;
+	struct fc_bsg_job *job;
+	IOCB_t *rsp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_dmabuf *pbuflist = NULL;
+	struct fc_bsg_ctels_reply *els_reply;
+	uint8_t *rjt_data;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = cmdiocbq->context1;
+	/* normal completion and timeout crossed paths, already done */
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		return;
+	}
+
+	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
+	if (cmdiocbq->context2 && rspiocbq)
+		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
+		       &rspiocbq->iocb, sizeof(IOCB_t));
+
+	job = dd_data->context_un.iocb.set_job;
+	cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
+	rspiocbq = dd_data->context_un.iocb.rspiocbq;
+	rsp = &rspiocbq->iocb;
+	ndlp = dd_data->context_un.iocb.ndlp;
+
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+	if (job->reply->result == -EAGAIN)
+		rc = -EAGAIN;
+	else if (rsp->ulpStatus == IOSTAT_SUCCESS)
+		job->reply->reply_payload_rcv_len =
+			rsp->un.elsreq64.bdl.bdeSize;
+	else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+		job->reply->reply_payload_rcv_len =
+			sizeof(struct fc_bsg_ctels_reply);
+		/* LS_RJT data returned in word 4 */
+		rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+		els_reply = &job->reply->reply_data.ctels_reply;
+		els_reply->status = FC_CTELS_STATUS_REJECT;
+		els_reply->rjt_data.action = rjt_data[3];
+		els_reply->rjt_data.reason_code = rjt_data[2];
+		els_reply->rjt_data.reason_explanation = rjt_data[1];
+		els_reply->rjt_data.vendor_unique = rjt_data[0];
+	} else
+		rc = -EIO;
+
+	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
+	lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
+	lpfc_sli_release_iocbq(phba, rspiocbq);
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	lpfc_nlp_put(ndlp);
+	kfree(dd_data);
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	job->dd_data = NULL;
 	/* complete the job back to userspace */
 	job->job_done(job);
-
-	return 0;
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	return;
 }
 
 /**
  * lpfc_bsg_rport_els - send an ELS command from a bsg request
  * @job: fc_bsg_job to handle
- */
+ **/
 static int
 lpfc_bsg_rport_els(struct fc_bsg_job *job)
 {
@@ -236,7 +484,6 @@
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_rport_data *rdata = job->rport->dd_data;
 	struct lpfc_nodelist *ndlp = rdata->pnode;
-
 	uint32_t elscmd;
 	uint32_t cmdsize;
 	uint32_t rspsize;
@@ -248,20 +495,30 @@
 	struct lpfc_dmabuf *prsp;
 	struct lpfc_dmabuf *pbuflist = NULL;
 	struct ulp_bde64 *bpl;
-	int iocb_status;
 	int request_nseg;
 	int reply_nseg;
 	struct scatterlist *sgel = NULL;
 	int numbde;
 	dma_addr_t busaddr;
+	struct bsg_job_data *dd_data;
+	uint32_t creg_val;
 	int rc = 0;
 
 	/* in case no data is transferred */
 	job->reply->reply_payload_rcv_len = 0;
 
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2735 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto no_dd_data;
+	}
+
 	if (!lpfc_nlp_get(ndlp)) {
 		rc = -ENODEV;
-		goto out;
+		goto free_dd_data;
 	}
 
 	elscmd = job->request->rqst_data.r_els.els_code;
@@ -271,24 +528,24 @@
 	if (!rspiocbq) {
 		lpfc_nlp_put(ndlp);
 		rc = -ENOMEM;
-		goto out;
+		goto free_dd_data;
 	}
 
 	rsp = &rspiocbq->iocb;
 	rpi = ndlp->nlp_rpi;
 
-	cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp,
+	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
 				      ndlp->nlp_DID, elscmd);
-
 	if (!cmdiocbq) {
-		lpfc_sli_release_iocbq(phba, rspiocbq);
-		return -EIO;
+		rc = -EIO;
+		goto free_rspiocbq;
 	}
 
-	job->dd_data = cmdiocbq;
+	/* prep els iocb set context1 to the ndlp, context2 to the command
+	 * dmabuf, context3 holds the data dmabuf
+	 */
 	pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
 	prsp = (struct lpfc_dmabuf *) pcmd->list.next;
-
 	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
 	kfree(pcmd);
 	lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
@@ -300,7 +557,6 @@
 
 	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
 				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
-
 	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
 		busaddr = sg_dma_address(sgel);
 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
@@ -322,7 +578,6 @@
 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
 		bpl++;
 	}
-
 	cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
 		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
 	cmdiocbq->iocb.ulpContext = rpi;
@@ -330,102 +585,62 @@
 	cmdiocbq->context1 = NULL;
 	cmdiocbq->context2 = NULL;
 
-	iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
-					rspiocbq, (phba->fc_ratov * 2)
-					       + LPFC_DRVR_TIMEOUT);
+	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
+	cmdiocbq->context1 = dd_data;
+	cmdiocbq->context2 = rspiocbq;
+	dd_data->type = TYPE_IOCB;
+	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
+	dd_data->context_un.iocb.rspiocbq = rspiocbq;
+	dd_data->context_un.iocb.set_job = job;
+	dd_data->context_un.iocb.bmp = NULL;;
+	dd_data->context_un.iocb.ndlp = ndlp;
 
-	/* release the new ndlp once the iocb completes */
-	lpfc_nlp_put(ndlp);
-	if (iocb_status != IOCB_TIMEDOUT) {
-		pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-		pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		creg_val = readl(phba->HCregaddr);
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
 	}
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+	lpfc_nlp_put(ndlp);
+	if (rc == IOCB_SUCCESS)
+		return 0; /* done for now */
 
-	if (iocb_status == IOCB_SUCCESS) {
-		if (rsp->ulpStatus == IOSTAT_SUCCESS) {
-			job->reply->reply_payload_rcv_len =
-				rsp->un.elsreq64.bdl.bdeSize;
-			rc = 0;
-		} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
-			struct fc_bsg_ctels_reply *els_reply;
-			/* LS_RJT data returned in word 4 */
-			uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 
-			els_reply = &job->reply->reply_data.ctels_reply;
-			job->reply->result = 0;
-			els_reply->status = FC_CTELS_STATUS_REJECT;
-			els_reply->rjt_data.action = rjt_data[0];
-			els_reply->rjt_data.reason_code = rjt_data[1];
-			els_reply->rjt_data.reason_explanation = rjt_data[2];
-			els_reply->rjt_data.vendor_unique = rjt_data[3];
-		} else
-			rc = -EIO;
-	} else
-		rc = -EIO;
+	lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
 
-	if (iocb_status != IOCB_TIMEDOUT)
-		lpfc_els_free_iocb(phba, cmdiocbq);
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
 
+free_rspiocbq:
 	lpfc_sli_release_iocbq(phba, rspiocbq);
 
-out:
+free_dd_data:
+	kfree(dd_data);
+
+no_dd_data:
 	/* make error code available to userspace */
 	job->reply->result = rc;
-	/* complete the job back to userspace */
-	job->job_done(job);
-
-	return 0;
+	job->dd_data = NULL;
+	return rc;
 }
 
-struct lpfc_ct_event {
-	struct list_head node;
-	int ref;
-	wait_queue_head_t wq;
-
-	/* Event type and waiter identifiers */
-	uint32_t type_mask;
-	uint32_t req_id;
-	uint32_t reg_id;
-
-	/* next two flags are here for the auto-delete logic */
-	unsigned long wait_time_stamp;
-	int waiting;
-
-	/* seen and not seen events */
-	struct list_head events_to_get;
-	struct list_head events_to_see;
-};
-
-struct event_data {
-	struct list_head node;
-	uint32_t type;
-	uint32_t immed_dat;
-	void *data;
-	uint32_t len;
-};
-
-static struct lpfc_ct_event *
-lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
-{
-	struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
-	if (!evt)
-		return NULL;
-
-	INIT_LIST_HEAD(&evt->events_to_get);
-	INIT_LIST_HEAD(&evt->events_to_see);
-	evt->req_id = ev_req_id;
-	evt->reg_id = ev_reg_id;
-	evt->wait_time_stamp = jiffies;
-	init_waitqueue_head(&evt->wq);
-
-	return evt;
-}
-
+/**
+ * lpfc_bsg_event_free - frees an allocated event structure
+ * @kref: Pointer to a kref.
+ *
+ * Called from kref_put. Back cast the kref into an event structure address.
+ * Free any events to get, delete associated nodes, free any events to see,
+ * free any data then free the event itself.
+ **/
 static void
-lpfc_ct_event_free(struct lpfc_ct_event *evt)
+lpfc_bsg_event_free(struct kref *kref)
 {
+	struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
+						  kref);
 	struct event_data *ed;
 
 	list_del(&evt->node);
@@ -447,25 +662,82 @@
 	kfree(evt);
 }
 
+/**
+ * lpfc_bsg_event_ref - increments the kref for an event
+ * @evt: Pointer to an event structure.
+ **/
 static inline void
-lpfc_ct_event_ref(struct lpfc_ct_event *evt)
+lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
 {
-	evt->ref++;
+	kref_get(&evt->kref);
 }
 
+/**
+ * lpfc_bsg_event_unref - Uses kref_put to free an event structure
+ * @evt: Pointer to an event structure.
+ **/
 static inline void
-lpfc_ct_event_unref(struct lpfc_ct_event *evt)
+lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
 {
-	if (--evt->ref < 0)
-		lpfc_ct_event_free(evt);
+	kref_put(&evt->kref, lpfc_bsg_event_free);
 }
 
-#define SLI_CT_ELX_LOOPBACK 0x10
+/**
+ * lpfc_bsg_event_new - allocate and initialize a event structure
+ * @ev_mask: Mask of events.
+ * @ev_reg_id: Event reg id.
+ * @ev_req_id: Event request id.
+ **/
+static struct lpfc_bsg_event *
+lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
+{
+	struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
 
-enum ELX_LOOPBACK_CMD {
-	ELX_LOOPBACK_XRI_SETUP,
-	ELX_LOOPBACK_DATA,
-};
+	if (!evt)
+		return NULL;
+
+	INIT_LIST_HEAD(&evt->events_to_get);
+	INIT_LIST_HEAD(&evt->events_to_see);
+	evt->type_mask = ev_mask;
+	evt->req_id = ev_req_id;
+	evt->reg_id = ev_reg_id;
+	evt->wait_time_stamp = jiffies;
+	init_waitqueue_head(&evt->wq);
+	kref_init(&evt->kref);
+	return evt;
+}
+
+/**
+ * diag_cmd_data_free - Frees an lpfc dma buffer extension
+ * @phba: Pointer to HBA context object.
+ * @mlist: Pointer to an lpfc dma buffer extension.
+ **/
+static int
+diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
+{
+	struct lpfc_dmabufext *mlast;
+	struct pci_dev *pcidev;
+	struct list_head head, *curr, *next;
+
+	if ((!mlist) || (!lpfc_is_link_up(phba) &&
+		(phba->link_flag & LS_LOOPBACK_MODE))) {
+		return 0;
+	}
+
+	pcidev = phba->pcidev;
+	list_add_tail(&head, &mlist->dma.list);
+
+	list_for_each_safe(curr, next, &head) {
+		mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
+		if (mlast->dma.virt)
+			dma_free_coherent(&pcidev->dev,
+					  mlast->size,
+					  mlast->dma.virt,
+					  mlast->dma.phys);
+		kfree(mlast);
+	}
+	return 0;
+}
 
 /**
  * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
@@ -474,9 +746,9 @@
  * @piocbq:
  *
  * This function is called when an unsolicited CT command is received.  It
- * forwards the event to any processes registerd to receive CT events.
- */
-void
+ * forwards the event to any processes registered to receive CT events.
+ **/
+int
 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			struct lpfc_iocbq *piocbq)
 {
@@ -484,7 +756,7 @@
 	uint32_t cmd;
 	uint32_t len;
 	struct lpfc_dmabuf *dmabuf = NULL;
-	struct lpfc_ct_event *evt;
+	struct lpfc_bsg_event *evt;
 	struct event_data *evt_dat = NULL;
 	struct lpfc_iocbq *iocbq;
 	size_t offset = 0;
@@ -496,6 +768,9 @@
 	struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
 	struct lpfc_hbq_entry *hbqe;
 	struct lpfc_sli_ct_request *ct_req;
+	struct fc_bsg_job *job = NULL;
+	unsigned long flags;
+	int size = 0;
 
 	INIT_LIST_HEAD(&head);
 	list_add_tail(&head, &piocbq->list);
@@ -504,6 +779,10 @@
 	    piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
 		goto error_ct_unsol_exit;
 
+	if (phba->link_state == LPFC_HBA_ERROR ||
+		(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
+		goto error_ct_unsol_exit;
+
 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
 		dmabuf = bdeBuf1;
 	else {
@@ -511,7 +790,8 @@
 				    piocbq->iocb.un.cont64[0].addrLow);
 		dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
 	}
-
+	if (dmabuf == NULL)
+		goto error_ct_unsol_exit;
 	ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
 	evt_req_id = ct_req->FsType;
 	cmd = ct_req->CommandResponse.bits.CmdRsp;
@@ -519,24 +799,24 @@
 	if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
 		lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
 
-	mutex_lock(&phba->ct_event_mutex);
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
-		if (evt->req_id != evt_req_id)
+		if (!(evt->type_mask & FC_REG_CT_EVENT) ||
+			evt->req_id != evt_req_id)
 			continue;
 
-		lpfc_ct_event_ref(evt);
-
+		lpfc_bsg_event_ref(evt);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
-		if (!evt_dat) {
-			lpfc_ct_event_unref(evt);
+		if (evt_dat == NULL) {
+			spin_lock_irqsave(&phba->ct_ev_lock, flags);
+			lpfc_bsg_event_unref(evt);
 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
 					"2614 Memory allocation failed for "
 					"CT event\n");
 			break;
 		}
 
-		mutex_unlock(&phba->ct_event_mutex);
-
 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
 			/* take accumulated byte count from the last iocbq */
 			iocbq = list_entry(head.prev, typeof(*iocbq), list);
@@ -550,25 +830,25 @@
 		}
 
 		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
-		if (!evt_dat->data) {
+		if (evt_dat->data == NULL) {
 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
 					"2615 Memory allocation failed for "
 					"CT event data, size %d\n",
 					evt_dat->len);
 			kfree(evt_dat);
-			mutex_lock(&phba->ct_event_mutex);
-			lpfc_ct_event_unref(evt);
-			mutex_unlock(&phba->ct_event_mutex);
+			spin_lock_irqsave(&phba->ct_ev_lock, flags);
+			lpfc_bsg_event_unref(evt);
+			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 			goto error_ct_unsol_exit;
 		}
 
 		list_for_each_entry(iocbq, &head, list) {
+			size = 0;
 			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
 				bdeBuf1 = iocbq->context2;
 				bdeBuf2 = iocbq->context3;
 			}
 			for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
-				int size = 0;
 				if (phba->sli3_options &
 				    LPFC_SLI3_HBQ_ENABLED) {
 					if (i == 0) {
@@ -601,9 +881,11 @@
 						iocbq);
 					kfree(evt_dat->data);
 					kfree(evt_dat);
-					mutex_lock(&phba->ct_event_mutex);
-					lpfc_ct_event_unref(evt);
-					mutex_unlock(&phba->ct_event_mutex);
+					spin_lock_irqsave(&phba->ct_ev_lock,
+						flags);
+					lpfc_bsg_event_unref(evt);
+					spin_unlock_irqrestore(
+						&phba->ct_ev_lock, flags);
 					goto error_ct_unsol_exit;
 				}
 				memcpy((char *)(evt_dat->data) + offset,
@@ -616,15 +898,24 @@
 								 dmabuf);
 				} else {
 					switch (cmd) {
+					case ELX_LOOPBACK_DATA:
+						diag_cmd_data_free(phba,
+						(struct lpfc_dmabufext *)
+							dmabuf);
+						break;
 					case ELX_LOOPBACK_XRI_SETUP:
-						if (!(phba->sli3_options &
-						      LPFC_SLI3_HBQ_ENABLED))
+						if ((phba->sli_rev ==
+							LPFC_SLI_REV2) ||
+							(phba->sli3_options &
+							LPFC_SLI3_HBQ_ENABLED
+							)) {
+							lpfc_in_buf_free(phba,
+									dmabuf);
+						} else {
 							lpfc_post_buffer(phba,
 									 pring,
 									 1);
-						else
-							lpfc_in_buf_free(phba,
-									dmabuf);
+						}
 						break;
 					default:
 						if (!(phba->sli3_options &
@@ -638,7 +929,7 @@
 			}
 		}
 
-		mutex_lock(&phba->ct_event_mutex);
+		spin_lock_irqsave(&phba->ct_ev_lock, flags);
 		if (phba->sli_rev == LPFC_SLI_REV4) {
 			evt_dat->immed_dat = phba->ctx_idx;
 			phba->ctx_idx = (phba->ctx_idx + 1) % 64;
@@ -651,122 +942,144 @@
 
 		evt_dat->type = FC_REG_CT_EVENT;
 		list_add(&evt_dat->node, &evt->events_to_see);
-		wake_up_interruptible(&evt->wq);
-		lpfc_ct_event_unref(evt);
-		if (evt_req_id == SLI_CT_ELX_LOOPBACK)
+		if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
+			wake_up_interruptible(&evt->wq);
+			lpfc_bsg_event_unref(evt);
 			break;
+		}
+
+		list_move(evt->events_to_see.prev, &evt->events_to_get);
+		lpfc_bsg_event_unref(evt);
+
+		job = evt->set_job;
+		evt->set_job = NULL;
+		if (job) {
+			job->reply->reply_payload_rcv_len = size;
+			/* make error code available to userspace */
+			job->reply->result = 0;
+			job->dd_data = NULL;
+			/* complete the job back to userspace */
+			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+			job->job_done(job);
+			spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		}
 	}
-	mutex_unlock(&phba->ct_event_mutex);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
 error_ct_unsol_exit:
 	if (!list_empty(&head))
 		list_del(&head);
-
-	return;
+	if (evt_req_id == SLI_CT_ELX_LOOPBACK)
+		return 0;
+	return 1;
 }
 
 /**
- * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
+ * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
  * @job: SET_EVENT fc_bsg_job
- */
+ **/
 static int
-lpfc_bsg_set_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
 	struct set_ct_event *event_req;
-	struct lpfc_ct_event *evt;
+	struct lpfc_bsg_event *evt;
 	int rc = 0;
+	struct bsg_job_data *dd_data = NULL;
+	uint32_t ev_mask;
+	unsigned long flags;
 
 	if (job->request_len <
 	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
 				"2612 Received SET_CT_EVENT below minimum "
 				"size\n");
-		return -EINVAL;
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (dd_data == NULL) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2734 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto job_error;
 	}
 
 	event_req = (struct set_ct_event *)
 		job->request->rqst_data.h_vendor.vendor_cmd;
-
-	mutex_lock(&phba->ct_event_mutex);
+	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
+				FC_REG_EVENT_MASK);
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
 		if (evt->reg_id == event_req->ev_reg_id) {
-			lpfc_ct_event_ref(evt);
+			lpfc_bsg_event_ref(evt);
 			evt->wait_time_stamp = jiffies;
 			break;
 		}
 	}
-	mutex_unlock(&phba->ct_event_mutex);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
 	if (&evt->node == &phba->ct_ev_waiters) {
 		/* no event waiting struct yet - first call */
-		evt = lpfc_ct_event_new(event_req->ev_reg_id,
+		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
 					event_req->ev_req_id);
 		if (!evt) {
 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
 					"2617 Failed allocation of event "
 					"waiter\n");
-			return -ENOMEM;
+			rc = -ENOMEM;
+			goto job_error;
 		}
 
-		mutex_lock(&phba->ct_event_mutex);
+		spin_lock_irqsave(&phba->ct_ev_lock, flags);
 		list_add(&evt->node, &phba->ct_ev_waiters);
-		lpfc_ct_event_ref(evt);
-		mutex_unlock(&phba->ct_event_mutex);
+		lpfc_bsg_event_ref(evt);
+		evt->wait_time_stamp = jiffies;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 	}
 
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	evt->waiting = 1;
-	if (wait_event_interruptible(evt->wq,
-				     !list_empty(&evt->events_to_see))) {
-		mutex_lock(&phba->ct_event_mutex);
-		lpfc_ct_event_unref(evt); /* release ref */
-		lpfc_ct_event_unref(evt); /* delete */
-		mutex_unlock(&phba->ct_event_mutex);
-		rc = -EINTR;
-		goto set_event_out;
-	}
+	dd_data->type = TYPE_EVT;
+	dd_data->context_un.evt = evt;
+	evt->set_job = job; /* for unsolicited command */
+	job->dd_data = dd_data; /* for fc transport timeout callback*/
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	return 0; /* call job done later */
 
-	evt->wait_time_stamp = jiffies;
-	evt->waiting = 0;
+job_error:
+	if (dd_data != NULL)
+		kfree(dd_data);
 
-	mutex_lock(&phba->ct_event_mutex);
-	list_move(evt->events_to_see.prev, &evt->events_to_get);
-	lpfc_ct_event_unref(evt); /* release ref */
-	mutex_unlock(&phba->ct_event_mutex);
-
-set_event_out:
-	/* set_event carries no reply payload */
-	job->reply->reply_payload_rcv_len = 0;
-	/* make error code available to userspace */
-	job->reply->result = rc;
-	/* complete the job back to userspace */
-	job->job_done(job);
-
-	return 0;
+	job->dd_data = NULL;
+	return rc;
 }
 
 /**
- * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
+ * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
  * @job: GET_EVENT fc_bsg_job
- */
+ **/
 static int
-lpfc_bsg_get_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
 	struct get_ct_event *event_req;
 	struct get_ct_event_reply *event_reply;
-	struct lpfc_ct_event *evt;
+	struct lpfc_bsg_event *evt;
 	struct event_data *evt_dat = NULL;
-	int rc = 0;
+	unsigned long flags;
+	uint32_t rc = 0;
 
 	if (job->request_len <
 	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
 				"2613 Received GET_CT_EVENT request below "
 				"minimum size\n");
-		return -EINVAL;
+		rc = -EINVAL;
+		goto job_error;
 	}
 
 	event_req = (struct get_ct_event *)
@@ -774,13 +1087,12 @@
 
 	event_reply = (struct get_ct_event_reply *)
 		job->reply->reply_data.vendor_reply.vendor_rsp;
-
-	mutex_lock(&phba->ct_event_mutex);
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
 		if (evt->reg_id == event_req->ev_reg_id) {
 			if (list_empty(&evt->events_to_get))
 				break;
-			lpfc_ct_event_ref(evt);
+			lpfc_bsg_event_ref(evt);
 			evt->wait_time_stamp = jiffies;
 			evt_dat = list_entry(evt->events_to_get.prev,
 					     struct event_data, node);
@@ -788,45 +1100,1539 @@
 			break;
 		}
 	}
-	mutex_unlock(&phba->ct_event_mutex);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
-	if (!evt_dat) {
+	/* The app may continue to ask for event data until it gets
+	 * an error indicating that there isn't anymore
+	 */
+	if (evt_dat == NULL) {
 		job->reply->reply_payload_rcv_len = 0;
 		rc = -ENOENT;
-		goto error_get_event_exit;
+		goto job_error;
 	}
 
-	if (evt_dat->len > job->reply_payload.payload_len) {
-		evt_dat->len = job->reply_payload.payload_len;
-			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
-					"2618 Truncated event data at %d "
-					"bytes\n",
-					job->reply_payload.payload_len);
+	if (evt_dat->len > job->request_payload.payload_len) {
+		evt_dat->len = job->request_payload.payload_len;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2618 Truncated event data at %d "
+				"bytes\n",
+				job->request_payload.payload_len);
 	}
 
+	event_reply->type = evt_dat->type;
 	event_reply->immed_data = evt_dat->immed_dat;
-
 	if (evt_dat->len > 0)
 		job->reply->reply_payload_rcv_len =
-			sg_copy_from_buffer(job->reply_payload.sg_list,
-					    job->reply_payload.sg_cnt,
+			sg_copy_from_buffer(job->request_payload.sg_list,
+					    job->request_payload.sg_cnt,
 					    evt_dat->data, evt_dat->len);
 	else
 		job->reply->reply_payload_rcv_len = 0;
-	rc = 0;
 
-	if (evt_dat)
+	if (evt_dat) {
 		kfree(evt_dat->data);
-	kfree(evt_dat);
-	mutex_lock(&phba->ct_event_mutex);
-	lpfc_ct_event_unref(evt);
-	mutex_unlock(&phba->ct_event_mutex);
+		kfree(evt_dat);
+	}
 
-error_get_event_exit:
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	lpfc_bsg_event_unref(evt);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	job->dd_data = NULL;
+	job->reply->result = 0;
+	job->job_done(job);
+	return 0;
+
+job_error:
+	job->dd_data = NULL;
+	job->reply->result = rc;
+	return rc;
+}
+
+/**
+ * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_issue_ct_rsp_cmp function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copy the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct bsg_job_data *dd_data;
+	struct fc_bsg_job *job;
+	IOCB_t *rsp;
+	struct lpfc_dmabuf *bmp;
+	struct lpfc_nodelist *ndlp;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = cmdiocbq->context1;
+	/* normal completion and timeout crossed paths, already done */
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		return;
+	}
+
+	job = dd_data->context_un.iocb.set_job;
+	bmp = dd_data->context_un.iocb.bmp;
+	rsp = &rspiocbq->iocb;
+	ndlp = dd_data->context_un.iocb.ndlp;
+
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+	if (rsp->ulpStatus) {
+		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+			switch (rsp->un.ulpWord[4] & 0xff) {
+			case IOERR_SEQUENCE_TIMEOUT:
+				rc = -ETIMEDOUT;
+				break;
+			case IOERR_INVALID_RPI:
+				rc = -EFAULT;
+				break;
+			default:
+				rc = -EACCES;
+				break;
+			}
+		} else
+			rc = -EACCES;
+	} else
+		job->reply->reply_payload_rcv_len =
+			rsp->un.genreq64.bdl.bdeSize;
+
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	lpfc_nlp_put(ndlp);
+	kfree(bmp);
+	kfree(dd_data);
 	/* make error code available to userspace */
 	job->reply->result = rc;
+	job->dd_data = NULL;
 	/* complete the job back to userspace */
 	job->job_done(job);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	return;
+}
+
+/**
+ * lpfc_issue_ct_rsp - issue a ct response
+ * @phba: Pointer to HBA context object.
+ * @job: Pointer to the job object.
+ * @tag: tag index value into the ports context exchange array.
+ * @bmp: Pointer to a dma buffer descriptor.
+ * @num_entry: Number of enties in the bde.
+ **/
+static int
+lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
+		  struct lpfc_dmabuf *bmp, int num_entry)
+{
+	IOCB_t *icmd;
+	struct lpfc_iocbq *ctiocb = NULL;
+	int rc = 0;
+	struct lpfc_nodelist *ndlp = NULL;
+	struct bsg_job_data *dd_data;
+	uint32_t creg_val;
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2736 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto no_dd_data;
+	}
+
+	/* Allocate buffer for  command iocb */
+	ctiocb = lpfc_sli_get_iocbq(phba);
+	if (!ctiocb) {
+		rc = ENOMEM;
+		goto no_ctiocb;
+	}
+
+	icmd = &ctiocb->iocb;
+	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
+	icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+	icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
+	icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+	icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
+	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
+	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+	/* Fill in rest of iocb */
+	icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+	icmd->ulpBdeCount = 1;
+	icmd->ulpLe = 1;
+	icmd->ulpClass = CLASS3;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		/* Do not issue unsol response if oxid not marked as valid */
+		if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
+			rc = IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
+		icmd->ulpContext = phba->ct_ctx[tag].oxid;
+		ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
+		if (!ndlp) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+				 "2721 ndlp null for oxid %x SID %x\n",
+					icmd->ulpContext,
+					phba->ct_ctx[tag].SID);
+			rc = IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
+		icmd->un.ulpWord[3] = ndlp->nlp_rpi;
+		/* The exchange is done, mark the entry as invalid */
+		phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
+	} else
+		icmd->ulpContext = (ushort) tag;
+
+	icmd->ulpTimeout = phba->fc_ratov * 2;
+
+	/* Xmit CT response on exchange <xid> */
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
+			icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
+
+	ctiocb->iocb_cmpl = NULL;
+	ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
+	ctiocb->vport = phba->pport;
+	ctiocb->context3 = bmp;
+
+	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
+	ctiocb->context1 = dd_data;
+	ctiocb->context2 = NULL;
+	dd_data->type = TYPE_IOCB;
+	dd_data->context_un.iocb.cmdiocbq = ctiocb;
+	dd_data->context_un.iocb.rspiocbq = NULL;
+	dd_data->context_un.iocb.set_job = job;
+	dd_data->context_un.iocb.bmp = bmp;
+	dd_data->context_un.iocb.ndlp = ndlp;
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		creg_val = readl(phba->HCregaddr);
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+
+	if (rc == IOCB_SUCCESS)
+		return 0; /* done for now */
+
+issue_ct_rsp_exit:
+	lpfc_sli_release_iocbq(phba, ctiocb);
+no_ctiocb:
+	kfree(dd_data);
+no_dd_data:
+	return rc;
+}
+
+/**
+ * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
+ * @job: SEND_MGMT_RESP fc_bsg_job
+ **/
+static int
+lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+	struct ulp_bde64 *bpl;
+	struct lpfc_dmabuf *bmp = NULL;
+	struct scatterlist *sgel = NULL;
+	int request_nseg;
+	int numbde;
+	dma_addr_t busaddr;
+	uint32_t tag = mgmt_resp->tag;
+	unsigned long reqbfrcnt =
+			(unsigned long)job->request_payload.payload_len;
+	int rc = 0;
+
+	/* in case no data is transferred */
+	job->reply->reply_payload_rcv_len = 0;
+
+	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
+		rc = -ERANGE;
+		goto send_mgmt_rsp_exit;
+	}
+
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc = -ENOMEM;
+		goto send_mgmt_rsp_exit;
+	}
+
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+	if (!bmp->virt) {
+		rc = -ENOMEM;
+		goto send_mgmt_rsp_free_bmp;
+	}
+
+	INIT_LIST_HEAD(&bmp->list);
+	bpl = (struct ulp_bde64 *) bmp->virt;
+	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
+				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
+
+	if (rc == IOCB_SUCCESS)
+		return 0; /* done for now */
+
+	/* TBD need to handle a timeout */
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+			  job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	rc = -EACCES;
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+
+send_mgmt_rsp_free_bmp:
+	kfree(bmp);
+send_mgmt_rsp_exit:
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	job->dd_data = NULL;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing a port into diagnostic loopback
+ * mode in order to perform a diagnostic loopback test.
+ * All new scsi requests are blocked, a small delay is used to allow the
+ * scsi requests to complete then the link is brought down. If the link is
+ * is placed in loopback mode then scsi requests are again allowed
+ * so the scsi mid-layer doesn't give up on the port.
+ * All of this is done in-line.
+ */
+static int
+lpfc_bsg_diag_mode(struct fc_bsg_job *job)
+{
+	struct Scsi_Host *shost = job->shost;
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct diag_mode_set *loopback_mode;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
+	uint32_t link_flags;
+	uint32_t timeout;
+	struct lpfc_vport **vports;
+	LPFC_MBOXQ_t *pmboxq;
+	int mbxstatus;
+	int i = 0;
+	int rc = 0;
+
+	/* no data to return just the return code */
+	job->reply->reply_payload_rcv_len = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2738 Received DIAG MODE request below minimum "
+				"size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	loopback_mode = (struct diag_mode_set *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+	link_flags = loopback_mode->type;
+	timeout = loopback_mode->timeout;
+
+	if ((phba->link_state == LPFC_HBA_ERROR) ||
+	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+		rc = -EACCES;
+		goto job_error;
+	}
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq) {
+		rc = -ENOMEM;
+		goto job_error;
+	}
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports) {
+		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			scsi_block_requests(shost);
+		}
+
+		lpfc_destroy_vport_work_array(phba, vports);
+	} else {
+		shost = lpfc_shost_from_vport(phba->pport);
+		scsi_block_requests(shost);
+	}
+
+	while (pring->txcmplq_cnt) {
+		if (i++ > 500)	/* wait up to 5 seconds */
+			break;
+
+		msleep(10);
+	}
+
+	memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
+	pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
+		/* wait for link down before proceeding */
+		i = 0;
+		while (phba->link_state != LPFC_LINK_DOWN) {
+			if (i++ > timeout) {
+				rc = -ETIMEDOUT;
+				goto loopback_mode_exit;
+			}
+
+			msleep(10);
+		}
+
+		memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+		if (link_flags == INTERNAL_LOOP_BACK)
+			pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
+		else
+			pmboxq->u.mb.un.varInitLnk.link_flags =
+				FLAGS_TOPOLOGY_MODE_LOOP;
+
+		pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
+		pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
+						     LPFC_MBOX_TMO);
+
+		if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
+			rc = -ENODEV;
+		else {
+			phba->link_flag |= LS_LOOPBACK_MODE;
+			/* wait for the link attention interrupt */
+			msleep(100);
+
+			i = 0;
+			while (phba->link_state != LPFC_HBA_READY) {
+				if (i++ > timeout) {
+					rc = -ETIMEDOUT;
+					break;
+				}
+
+				msleep(10);
+			}
+		}
+
+	} else
+		rc = -ENODEV;
+
+loopback_mode_exit:
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports) {
+		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			scsi_unblock_requests(shost);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+	} else {
+		shost = lpfc_shost_from_vport(phba->pport);
+		scsi_unblock_requests(shost);
+	}
+
+	/*
+	 * Let SLI layer release mboxq if mbox command completed after timeout.
+	 */
+	if (mbxstatus != MBX_TIMEOUT)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+
+job_error:
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace if no error */
+	if (rc == 0)
+		job->job_done(job);
+	return rc;
+}
+
+/**
+ * lpfcdiag_loop_self_reg - obtains a remote port login id
+ * @phba: Pointer to HBA context object
+ * @rpi: Pointer to a remote port login id
+ *
+ * This function obtains a remote port login id so the diag loopback test
+ * can send and receive its own unsolicited CT command.
+ **/
+static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
+{
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_dmabuf *dmabuff;
+	int status;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return ENOMEM;
+
+	status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
+				(uint8_t *)&phba->pport->fc_sparam, mbox, 0);
+	if (status) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return ENOMEM;
+	}
+
+	dmabuff = (struct lpfc_dmabuf *) mbox->context1;
+	mbox->context1 = NULL;
+	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+
+	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
+		lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
+		kfree(dmabuff);
+		if (status != MBX_TIMEOUT)
+			mempool_free(mbox, phba->mbox_mem_pool);
+		return ENODEV;
+	}
+
+	*rpi = mbox->u.mb.un.varWords[0];
+
+	lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
+	kfree(dmabuff);
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return 0;
+}
+
+/**
+ * lpfcdiag_loop_self_unreg - unregs from the rpi
+ * @phba: Pointer to HBA context object
+ * @rpi: Remote port login id
+ *
+ * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
+ **/
+static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
+{
+	LPFC_MBOXQ_t *mbox;
+	int status;
+
+	/* Allocate mboxq structure */
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mbox == NULL)
+		return ENOMEM;
+
+	lpfc_unreg_login(phba, 0, rpi, mbox);
+	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+
+	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
+		if (status != MBX_TIMEOUT)
+			mempool_free(mbox, phba->mbox_mem_pool);
+		return EIO;
+	}
+
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return 0;
+}
+
+/**
+ * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
+ * @phba: Pointer to HBA context object
+ * @rpi: Remote port login id
+ * @txxri: Pointer to transmit exchange id
+ * @rxxri: Pointer to response exchabge id
+ *
+ * This function obtains the transmit and receive ids required to send
+ * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
+ * flags are used to the unsolicted response handler is able to process
+ * the ct command sent on the same port.
+ **/
+static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
+			 uint16_t *txxri, uint16_t * rxxri)
+{
+	struct lpfc_bsg_event *evt;
+	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
+	IOCB_t *cmd, *rsp;
+	struct lpfc_dmabuf *dmabuf;
+	struct ulp_bde64 *bpl = NULL;
+	struct lpfc_sli_ct_request *ctreq = NULL;
+	int ret_val = 0;
+	unsigned long flags;
+
+	*txxri = 0;
+	*rxxri = 0;
+	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
+				SLI_CT_ELX_LOOPBACK);
+	if (!evt)
+		return ENOMEM;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	list_add(&evt->node, &phba->ct_ev_waiters);
+	lpfc_bsg_event_ref(evt);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	rspiocbq = lpfc_sli_get_iocbq(phba);
+
+	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (dmabuf) {
+		dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
+		INIT_LIST_HEAD(&dmabuf->list);
+		bpl = (struct ulp_bde64 *) dmabuf->virt;
+		memset(bpl, 0, sizeof(*bpl));
+		ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
+		bpl->addrHigh =
+			le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
+		bpl->addrLow =
+			le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
+		bpl->tus.f.bdeFlags = 0;
+		bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+	}
+
+	if (cmdiocbq == NULL || rspiocbq == NULL ||
+	    dmabuf == NULL || bpl == NULL || ctreq == NULL) {
+		ret_val = ENOMEM;
+		goto err_get_xri_exit;
+	}
+
+	cmd = &cmdiocbq->iocb;
+	rsp = &rspiocbq->iocb;
+
+	memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
+
+	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+	ctreq->RevisionId.bits.InId = 0;
+	ctreq->FsType = SLI_CT_ELX_LOOPBACK;
+	ctreq->FsSubType = 0;
+	ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
+	ctreq->CommandResponse.bits.Size = 0;
+
+
+	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
+	cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
+	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
+
+	cmd->un.xseq64.w5.hcsw.Fctl = LA;
+	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
+	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
+	cmd->ulpBdeCount = 1;
+	cmd->ulpLe = 1;
+	cmd->ulpClass = CLASS3;
+	cmd->ulpContext = rpi;
+
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	cmdiocbq->vport = phba->pport;
+
+	ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
+				rspiocbq,
+				(phba->fc_ratov * 2)
+				+ LPFC_DRVR_TIMEOUT);
+	if (ret_val)
+		goto err_get_xri_exit;
+
+	*txxri =  rsp->ulpContext;
+
+	evt->waiting = 1;
+	evt->wait_time_stamp = jiffies;
+	ret_val = wait_event_interruptible_timeout(
+		evt->wq, !list_empty(&evt->events_to_see),
+		((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+	if (list_empty(&evt->events_to_see))
+		ret_val = (ret_val) ? EINTR : ETIMEDOUT;
+	else {
+		ret_val = IOCB_SUCCESS;
+		spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		list_move(evt->events_to_see.prev, &evt->events_to_get);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		*rxxri = (list_entry(evt->events_to_get.prev,
+				     typeof(struct event_data),
+				     node))->immed_dat;
+	}
+	evt->waiting = 0;
+
+err_get_xri_exit:
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	lpfc_bsg_event_unref(evt); /* release ref */
+	lpfc_bsg_event_unref(evt); /* delete */
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	if (dmabuf) {
+		if (dmabuf->virt)
+			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+		kfree(dmabuf);
+	}
+
+	if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
+		lpfc_sli_release_iocbq(phba, cmdiocbq);
+	if (rspiocbq)
+		lpfc_sli_release_iocbq(phba, rspiocbq);
+	return ret_val;
+}
+
+/**
+ * diag_cmd_data_alloc - fills in a bde struct with dma buffers
+ * @phba: Pointer to HBA context object
+ * @bpl: Pointer to 64 bit bde structure
+ * @size: Number of bytes to process
+ * @nocopydata: Flag to copy user data into the allocated buffer
+ *
+ * This function allocates page size buffers and populates an lpfc_dmabufext.
+ * If allowed the user data pointed to with indataptr is copied into the kernel
+ * memory. The chained list of page size buffers is returned.
+ **/
+static struct lpfc_dmabufext *
+diag_cmd_data_alloc(struct lpfc_hba *phba,
+		   struct ulp_bde64 *bpl, uint32_t size,
+		   int nocopydata)
+{
+	struct lpfc_dmabufext *mlist = NULL;
+	struct lpfc_dmabufext *dmp;
+	int cnt, offset = 0, i = 0;
+	struct pci_dev *pcidev;
+
+	pcidev = phba->pcidev;
+
+	while (size) {
+		/* We get chunks of 4K */
+		if (size > BUF_SZ_4K)
+			cnt = BUF_SZ_4K;
+		else
+			cnt = size;
+
+		/* allocate struct lpfc_dmabufext buffer header */
+		dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
+		if (!dmp)
+			goto out;
+
+		INIT_LIST_HEAD(&dmp->dma.list);
+
+		/* Queue it to a linked list */
+		if (mlist)
+			list_add_tail(&dmp->dma.list, &mlist->dma.list);
+		else
+			mlist = dmp;
+
+		/* allocate buffer */
+		dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
+						   cnt,
+						   &(dmp->dma.phys),
+						   GFP_KERNEL);
+
+		if (!dmp->dma.virt)
+			goto out;
+
+		dmp->size = cnt;
+
+		if (nocopydata) {
+			bpl->tus.f.bdeFlags = 0;
+			pci_dma_sync_single_for_device(phba->pcidev,
+				dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+		} else {
+			memset((uint8_t *)dmp->dma.virt, 0, cnt);
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		}
+
+		/* build buffer ptr list for IOCB */
+		bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
+		bpl->tus.f.bdeSize = (ushort) cnt;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		bpl++;
+
+		i++;
+		offset += cnt;
+		size -= cnt;
+	}
+
+	mlist->flag = i;
+	return mlist;
+out:
+	diag_cmd_data_free(phba, mlist);
+	return NULL;
+}
+
+/**
+ * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
+ * @phba: Pointer to HBA context object
+ * @rxxri: Receive exchange id
+ * @len: Number of data bytes
+ *
+ * This function allocates and posts a data buffer of sufficient size to recieve
+ * an unsolicted CT command.
+ **/
+static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
+			     size_t len)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
+	struct lpfc_iocbq *cmdiocbq;
+	IOCB_t *cmd = NULL;
+	struct list_head head, *curr, *next;
+	struct lpfc_dmabuf *rxbmp;
+	struct lpfc_dmabuf *dmp;
+	struct lpfc_dmabuf *mp[2] = {NULL, NULL};
+	struct ulp_bde64 *rxbpl = NULL;
+	uint32_t num_bde;
+	struct lpfc_dmabufext *rxbuffer = NULL;
+	int ret_val = 0;
+	int i = 0;
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (rxbmp != NULL) {
+		rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+		INIT_LIST_HEAD(&rxbmp->list);
+		rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+		rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
+	}
+
+	if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
+		ret_val = ENOMEM;
+		goto err_post_rxbufs_exit;
+	}
+
+	/* Queue buffers for the receive exchange */
+	num_bde = (uint32_t)rxbuffer->flag;
+	dmp = &rxbuffer->dma;
+
+	cmd = &cmdiocbq->iocb;
+	i = 0;
+
+	INIT_LIST_HEAD(&head);
+	list_add_tail(&head, &dmp->list);
+	list_for_each_safe(curr, next, &head) {
+		mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
+		list_del(curr);
+
+		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+			mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
+			cmd->un.quexri64cx.buff.bde.addrHigh =
+				putPaddrHigh(mp[i]->phys);
+			cmd->un.quexri64cx.buff.bde.addrLow =
+				putPaddrLow(mp[i]->phys);
+			cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
+				((struct lpfc_dmabufext *)mp[i])->size;
+			cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
+			cmd->ulpCommand = CMD_QUE_XRI64_CX;
+			cmd->ulpPU = 0;
+			cmd->ulpLe = 1;
+			cmd->ulpBdeCount = 1;
+			cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
+
+		} else {
+			cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
+			cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
+			cmd->un.cont64[i].tus.f.bdeSize =
+				((struct lpfc_dmabufext *)mp[i])->size;
+					cmd->ulpBdeCount = ++i;
+
+			if ((--num_bde > 0) && (i < 2))
+				continue;
+
+			cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
+			cmd->ulpLe = 1;
+		}
+
+		cmd->ulpClass = CLASS3;
+		cmd->ulpContext = rxxri;
+
+		ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+
+		if (ret_val == IOCB_ERROR) {
+			diag_cmd_data_free(phba,
+				(struct lpfc_dmabufext *)mp[0]);
+			if (mp[1])
+				diag_cmd_data_free(phba,
+					  (struct lpfc_dmabufext *)mp[1]);
+			dmp = list_entry(next, struct lpfc_dmabuf, list);
+			ret_val = EIO;
+			goto err_post_rxbufs_exit;
+		}
+
+		lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
+		if (mp[1]) {
+			lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
+			mp[1] = NULL;
+		}
+
+		/* The iocb was freed by lpfc_sli_issue_iocb */
+		cmdiocbq = lpfc_sli_get_iocbq(phba);
+		if (!cmdiocbq) {
+			dmp = list_entry(next, struct lpfc_dmabuf, list);
+			ret_val = EIO;
+			goto err_post_rxbufs_exit;
+		}
+
+		cmd = &cmdiocbq->iocb;
+		i = 0;
+	}
+	list_del(&head);
+
+err_post_rxbufs_exit:
+
+	if (rxbmp) {
+		if (rxbmp->virt)
+			lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
+		kfree(rxbmp);
+	}
+
+	if (cmdiocbq)
+		lpfc_sli_release_iocbq(phba, cmdiocbq);
+	return ret_val;
+}
+
+/**
+ * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
+ * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
+ *
+ * This function receives a user data buffer to be transmitted and received on
+ * the same port, the link must be up and in loopback mode prior
+ * to being called.
+ * 1. A kernel buffer is allocated to copy the user data into.
+ * 2. The port registers with "itself".
+ * 3. The transmit and receive exchange ids are obtained.
+ * 4. The receive exchange id is posted.
+ * 5. A new els loopback event is created.
+ * 6. The command and response iocbs are allocated.
+ * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
+ *
+ * This function is meant to be called n times while the port is in loopback
+ * so it is the apps responsibility to issue a reset to take the port out
+ * of loopback mode.
+ **/
+static int
+lpfc_bsg_diag_test(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct diag_mode_test *diag_mode;
+	struct lpfc_bsg_event *evt;
+	struct event_data *evdat;
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t size;
+	uint32_t full_size;
+	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
+	uint16_t rpi;
+	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
+	IOCB_t *cmd, *rsp;
+	struct lpfc_sli_ct_request *ctreq;
+	struct lpfc_dmabuf *txbmp;
+	struct ulp_bde64 *txbpl = NULL;
+	struct lpfc_dmabufext *txbuffer = NULL;
+	struct list_head head;
+	struct lpfc_dmabuf  *curr;
+	uint16_t txxri, rxxri;
+	uint32_t num_bde;
+	uint8_t *ptr = NULL, *rx_databuf = NULL;
+	int rc = 0;
+	unsigned long flags;
+	void *dataout = NULL;
+	uint32_t total_mem;
+
+	/* in case no data is returned return just the return code */
+	job->reply->reply_payload_rcv_len = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2739 Received DIAG TEST request below minimum "
+				"size\n");
+		rc = -EINVAL;
+		goto loopback_test_exit;
+	}
+
+	if (job->request_payload.payload_len !=
+		job->reply_payload.payload_len) {
+		rc = -EINVAL;
+		goto loopback_test_exit;
+	}
+
+	diag_mode = (struct diag_mode_test *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+
+	if ((phba->link_state == LPFC_HBA_ERROR) ||
+	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+		rc = -EACCES;
+		goto loopback_test_exit;
+	}
+
+	if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
+		rc = -EACCES;
+		goto loopback_test_exit;
+	}
+
+	size = job->request_payload.payload_len;
+	full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
+
+	if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
+		rc = -ERANGE;
+		goto loopback_test_exit;
+	}
+
+	if (size >= BUF_SZ_4K) {
+		/*
+		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
+		 * then we allocate 64k and re-use that buffer over and over to
+		 * xfer the whole block. This is because Linux kernel has a
+		 * problem allocating more than 120k of kernel space memory. Saw
+		 * problem with GET_FCPTARGETMAPPING...
+		 */
+		if (size <= (64 * 1024))
+			total_mem = size;
+		else
+			total_mem = 64 * 1024;
+	} else
+		/* Allocate memory for ioctl data */
+		total_mem = BUF_SZ_4K;
+
+	dataout = kmalloc(total_mem, GFP_KERNEL);
+	if (dataout == NULL) {
+		rc = -ENOMEM;
+		goto loopback_test_exit;
+	}
+
+	ptr = dataout;
+	ptr += ELX_LOOPBACK_HEADER_SZ;
+	sg_copy_to_buffer(job->request_payload.sg_list,
+				job->request_payload.sg_cnt,
+				ptr, size);
+
+	rc = lpfcdiag_loop_self_reg(phba, &rpi);
+	if (rc) {
+		rc = -ENOMEM;
+		goto loopback_test_exit;
+	}
+
+	rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
+	if (rc) {
+		lpfcdiag_loop_self_unreg(phba, rpi);
+		rc = -ENOMEM;
+		goto loopback_test_exit;
+	}
+
+	rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+	if (rc) {
+		lpfcdiag_loop_self_unreg(phba, rpi);
+		rc = -ENOMEM;
+		goto loopback_test_exit;
+	}
+
+	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
+				SLI_CT_ELX_LOOPBACK);
+	if (!evt) {
+		lpfcdiag_loop_self_unreg(phba, rpi);
+		rc = -ENOMEM;
+		goto loopback_test_exit;
+	}
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	list_add(&evt->node, &phba->ct_ev_waiters);
+	lpfc_bsg_event_ref(evt);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	rspiocbq = lpfc_sli_get_iocbq(phba);
+	txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+
+	if (txbmp) {
+		txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
+		INIT_LIST_HEAD(&txbmp->list);
+		txbpl = (struct ulp_bde64 *) txbmp->virt;
+		if (txbpl)
+			txbuffer = diag_cmd_data_alloc(phba,
+							txbpl, full_size, 0);
+	}
+
+	if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) {
+		rc = -ENOMEM;
+		goto err_loopback_test_exit;
+	}
+
+	cmd = &cmdiocbq->iocb;
+	rsp = &rspiocbq->iocb;
+
+	INIT_LIST_HEAD(&head);
+	list_add_tail(&head, &txbuffer->dma.list);
+	list_for_each_entry(curr, &head, list) {
+		segment_len = ((struct lpfc_dmabufext *)curr)->size;
+		if (current_offset == 0) {
+			ctreq = curr->virt;
+			memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
+			ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+			ctreq->RevisionId.bits.InId = 0;
+			ctreq->FsType = SLI_CT_ELX_LOOPBACK;
+			ctreq->FsSubType = 0;
+			ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
+			ctreq->CommandResponse.bits.Size   = size;
+			segment_offset = ELX_LOOPBACK_HEADER_SZ;
+		} else
+			segment_offset = 0;
+
+		BUG_ON(segment_offset >= segment_len);
+		memcpy(curr->virt + segment_offset,
+			ptr + current_offset,
+			segment_len - segment_offset);
+
+		current_offset += segment_len - segment_offset;
+		BUG_ON(current_offset > size);
+	}
+	list_del(&head);
+
+	/* Build the XMIT_SEQUENCE iocb */
+
+	num_bde = (uint32_t)txbuffer->flag;
+
+	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
+	cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
+	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
+
+	cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
+	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
+	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+	cmd->ulpBdeCount = 1;
+	cmd->ulpLe = 1;
+	cmd->ulpClass = CLASS3;
+	cmd->ulpContext = txxri;
+
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	cmdiocbq->vport = phba->pport;
+
+	rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
+				      (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
+
+	if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
+		rc = -EIO;
+		goto err_loopback_test_exit;
+	}
+
+	evt->waiting = 1;
+	rc = wait_event_interruptible_timeout(
+		evt->wq, !list_empty(&evt->events_to_see),
+		((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+	evt->waiting = 0;
+	if (list_empty(&evt->events_to_see))
+		rc = (rc) ? -EINTR : -ETIMEDOUT;
+	else {
+		spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		list_move(evt->events_to_see.prev, &evt->events_to_get);
+		evdat = list_entry(evt->events_to_get.prev,
+				   typeof(*evdat), node);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		rx_databuf = evdat->data;
+		if (evdat->len != full_size) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"1603 Loopback test did not receive expected "
+				"data length. actual length 0x%x expected "
+				"length 0x%x\n",
+				evdat->len, full_size);
+			rc = -EIO;
+		} else if (rx_databuf == NULL)
+			rc = -EIO;
+		else {
+			rc = IOCB_SUCCESS;
+			/* skip over elx loopback header */
+			rx_databuf += ELX_LOOPBACK_HEADER_SZ;
+			job->reply->reply_payload_rcv_len =
+				sg_copy_from_buffer(job->reply_payload.sg_list,
+						    job->reply_payload.sg_cnt,
+						    rx_databuf, size);
+			job->reply->reply_payload_rcv_len = size;
+		}
+	}
+
+err_loopback_test_exit:
+	lpfcdiag_loop_self_unreg(phba, rpi);
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	lpfc_bsg_event_unref(evt); /* release ref */
+	lpfc_bsg_event_unref(evt); /* delete */
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	if (cmdiocbq != NULL)
+		lpfc_sli_release_iocbq(phba, cmdiocbq);
+
+	if (rspiocbq != NULL)
+		lpfc_sli_release_iocbq(phba, rspiocbq);
+
+	if (txbmp != NULL) {
+		if (txbpl != NULL) {
+			if (txbuffer != NULL)
+				diag_cmd_data_free(phba, txbuffer);
+			lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
+		}
+		kfree(txbmp);
+	}
+
+loopback_test_exit:
+	kfree(dataout);
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	job->dd_data = NULL;
+	/* complete the job back to userspace if no error */
+	if (rc == 0)
+		job->job_done(job);
+	return rc;
+}
+
+/**
+ * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
+ * @job: GET_DFC_REV fc_bsg_job
+ **/
+static int
+lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct get_mgmt_rev *event_req;
+	struct get_mgmt_rev_reply *event_reply;
+	int rc = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2740 Received GET_DFC_REV request below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	event_req = (struct get_mgmt_rev *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+
+	event_reply = (struct get_mgmt_rev_reply *)
+		job->reply->reply_data.vendor_reply.vendor_rsp;
+
+	if (job->reply_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2741 Received GET_DFC_REV reply below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
+	event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
+job_error:
+	job->reply->result = rc;
+	if (rc == 0)
+		job->job_done(job);
+	return rc;
+}
+
+/**
+ * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox commands issued from
+ * lpfc_bsg_issue_mbox function. This function is called by the
+ * mailbox event handler function with no lock held. This function
+ * will wake up thread waiting on the wait queue pointed by context1
+ * of the mailbox.
+ **/
+void
+lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	struct bsg_job_data *dd_data;
+	MAILBOX_t *pmb;
+	MAILBOX_t *mb;
+	struct fc_bsg_job *job;
+	uint32_t size;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = pmboxq->context1;
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		return;
+	}
+
+	pmb = &dd_data->context_un.mbox.pmboxq->u.mb;
+	mb = dd_data->context_un.mbox.mb;
+	job = dd_data->context_un.mbox.set_job;
+	memcpy(mb, pmb, sizeof(*pmb));
+	size = job->request_payload.payload_len;
+	job->reply->reply_payload_rcv_len =
+		sg_copy_from_buffer(job->reply_payload.sg_list,
+				job->reply_payload.sg_cnt,
+				mb, size);
+	job->reply->result = 0;
+	dd_data->context_un.mbox.set_job = NULL;
+	job->dd_data = NULL;
+	job->job_done(job);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
+	kfree(mb);
+	kfree(dd_data);
+	return;
+}
+
+/**
+ * lpfc_bsg_check_cmd_access - test for a supported mailbox command
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a mailbox object.
+ * @vport: Pointer to a vport object.
+ *
+ * Some commands require the port to be offline, some may not be called from
+ * the application.
+ **/
+static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
+	MAILBOX_t *mb, struct lpfc_vport *vport)
+{
+	/* return negative error values for bsg job */
+	switch (mb->mbxCommand) {
+	/* Offline only */
+	case MBX_INIT_LINK:
+	case MBX_DOWN_LINK:
+	case MBX_CONFIG_LINK:
+	case MBX_CONFIG_RING:
+	case MBX_RESET_RING:
+	case MBX_UNREG_LOGIN:
+	case MBX_CLEAR_LA:
+	case MBX_DUMP_CONTEXT:
+	case MBX_RUN_DIAGS:
+	case MBX_RESTART:
+	case MBX_SET_MASK:
+		if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2743 Command 0x%x is illegal in on-line "
+				"state\n",
+				mb->mbxCommand);
+			return -EPERM;
+		}
+	case MBX_WRITE_NV:
+	case MBX_WRITE_VPARMS:
+	case MBX_LOAD_SM:
+	case MBX_READ_NV:
+	case MBX_READ_CONFIG:
+	case MBX_READ_RCONFIG:
+	case MBX_READ_STATUS:
+	case MBX_READ_XRI:
+	case MBX_READ_REV:
+	case MBX_READ_LNK_STAT:
+	case MBX_DUMP_MEMORY:
+	case MBX_DOWN_LOAD:
+	case MBX_UPDATE_CFG:
+	case MBX_KILL_BOARD:
+	case MBX_LOAD_AREA:
+	case MBX_LOAD_EXP_ROM:
+	case MBX_BEACON:
+	case MBX_DEL_LD_ENTRY:
+	case MBX_SET_DEBUG:
+	case MBX_WRITE_WWN:
+	case MBX_SLI4_CONFIG:
+	case MBX_READ_EVENT_LOG_STATUS:
+	case MBX_WRITE_EVENT_LOG:
+	case MBX_PORT_CAPABILITIES:
+	case MBX_PORT_IOV_CONTROL:
+		break;
+	case MBX_SET_VARIABLE:
+	case MBX_RUN_BIU_DIAG64:
+	case MBX_READ_EVENT_LOG:
+	case MBX_READ_SPARM64:
+	case MBX_READ_LA:
+	case MBX_READ_LA64:
+	case MBX_REG_LOGIN:
+	case MBX_REG_LOGIN64:
+	case MBX_CONFIG_PORT:
+	case MBX_RUN_BIU_DIAG:
+	default:
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+			"2742 Unknown Command 0x%x\n",
+			mb->mbxCommand);
+		return -EPERM;
+	}
+
+	return 0; /* ok */
+}
+
+/**
+ * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a mailbox object.
+ * @vport: Pointer to a vport object.
+ *
+ * Allocate a tracking object, mailbox command memory, get a mailbox
+ * from the mailbox pool, copy the caller mailbox command.
+ *
+ * If offline and the sli is active we need to poll for the command (port is
+ * being reset) and com-plete the job, otherwise issue the mailbox command and
+ * let our completion handler finish the command.
+ **/
+static uint32_t
+lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+	struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *pmboxq;
+	MAILBOX_t *pmb;
+	MAILBOX_t *mb;
+	struct bsg_job_data *dd_data;
+	uint32_t size;
+	int rc = 0;
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2727 Failed allocation of dd_data\n");
+		return -ENOMEM;
+	}
+
+	mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!mb) {
+		kfree(dd_data);
+		return -ENOMEM;
+	}
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq) {
+		kfree(dd_data);
+		kfree(mb);
+		return -ENOMEM;
+	}
+
+	size = job->request_payload.payload_len;
+	job->reply->reply_payload_rcv_len =
+		sg_copy_to_buffer(job->request_payload.sg_list,
+				job->request_payload.sg_cnt,
+				mb, size);
+
+	rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
+	if (rc != 0) {
+		kfree(dd_data);
+		kfree(mb);
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+		return rc; /* must be negative */
+	}
+
+	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+	pmb = &pmboxq->u.mb;
+	memcpy(pmb, mb, sizeof(*pmb));
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->context1 = NULL;
+	pmboxq->vport = vport;
+
+	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+	    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			if (rc != MBX_TIMEOUT) {
+				kfree(dd_data);
+				kfree(mb);
+				mempool_free(pmboxq, phba->mbox_mem_pool);
+			}
+			return  (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+		}
+
+		memcpy(mb, pmb, sizeof(*pmb));
+		job->reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(job->reply_payload.sg_list,
+					job->reply_payload.sg_cnt,
+					mb, size);
+		kfree(dd_data);
+		kfree(mb);
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+		/* not waiting mbox already done */
+		return 0;
+	}
+
+	/* setup wake call as IOCB callback */
+	pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
+	/* setup context field to pass wait_queue pointer to wake function */
+	pmboxq->context1 = dd_data;
+	dd_data->type = TYPE_MBOX;
+	dd_data->context_un.mbox.pmboxq = pmboxq;
+	dd_data->context_un.mbox.mb = mb;
+	dd_data->context_un.mbox.set_job = job;
+	job->dd_data = dd_data;
+	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+		kfree(dd_data);
+		kfree(mb);
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+		return -EIO;
+	}
+
+	return 1;
+}
+
+/**
+ * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
+ * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
+ **/
+static int
+lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	int rc = 0;
+
+	/* in case no data is transferred */
+	job->reply->reply_payload_rcv_len = 0;
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2737 Received MBOX_REQ request below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	if (job->request_payload.payload_len != PAGE_SIZE) {
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+		rc = -EAGAIN;
+		goto job_error;
+	}
+
+	rc = lpfc_bsg_issue_mbox(phba, job, vport);
+
+job_error:
+	if (rc == 0) {
+		/* job done */
+		job->reply->result = 0;
+		job->dd_data = NULL;
+		job->job_done(job);
+	} else if (rc == 1)
+		/* job submitted, will complete later*/
+		rc = 0; /* return zero, no error */
+	else {
+		/* some error occurred */
+		job->reply->result = rc;
+		job->dd_data = NULL;
+	}
 
 	return rc;
 }
@@ -834,38 +2640,57 @@
 /**
  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
  * @job: fc_bsg_job to handle
- */
+ **/
 static int
 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
 {
 	int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
+	int rc;
 
 	switch (command) {
 	case LPFC_BSG_VENDOR_SET_CT_EVENT:
-		return lpfc_bsg_set_event(job);
+		rc = lpfc_bsg_hba_set_event(job);
 		break;
-
 	case LPFC_BSG_VENDOR_GET_CT_EVENT:
-		return lpfc_bsg_get_event(job);
+		rc = lpfc_bsg_hba_get_event(job);
 		break;
-
+	case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
+		rc = lpfc_bsg_send_mgmt_rsp(job);
+		break;
+	case LPFC_BSG_VENDOR_DIAG_MODE:
+		rc = lpfc_bsg_diag_mode(job);
+		break;
+	case LPFC_BSG_VENDOR_DIAG_TEST:
+		rc = lpfc_bsg_diag_test(job);
+		break;
+	case LPFC_BSG_VENDOR_GET_MGMT_REV:
+		rc = lpfc_bsg_get_dfc_rev(job);
+		break;
+	case LPFC_BSG_VENDOR_MBOX:
+		rc = lpfc_bsg_mbox_cmd(job);
+		break;
 	default:
-		return -EINVAL;
+		rc = -EINVAL;
+		job->reply->reply_payload_rcv_len = 0;
+		/* make error code available to userspace */
+		job->reply->result = rc;
+		break;
 	}
+
+	return rc;
 }
 
 /**
  * lpfc_bsg_request - handle a bsg request from the FC transport
  * @job: fc_bsg_job to handle
- */
+ **/
 int
 lpfc_bsg_request(struct fc_bsg_job *job)
 {
 	uint32_t msgcode;
-	int rc = -EINVAL;
+	int rc;
 
 	msgcode = job->request->msgcode;
-
 	switch (msgcode) {
 	case FC_BSG_HST_VENDOR:
 		rc = lpfc_bsg_hst_vendor(job);
@@ -874,9 +2699,13 @@
 		rc = lpfc_bsg_rport_els(job);
 		break;
 	case FC_BSG_RPT_CT:
-		rc = lpfc_bsg_rport_ct(job);
+		rc = lpfc_bsg_send_mgmt_cmd(job);
 		break;
 	default:
+		rc = -EINVAL;
+		job->reply->reply_payload_rcv_len = 0;
+		/* make error code available to userspace */
+		job->reply->result = rc;
 		break;
 	}
 
@@ -889,17 +2718,71 @@
  *
  * This function just aborts the job's IOCB.  The aborted IOCB will return to
  * the waiting function which will handle passing the error back to userspace
- */
+ **/
 int
 lpfc_bsg_timeout(struct fc_bsg_job *job)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
-	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data;
+	struct lpfc_iocbq *cmdiocb;
+	struct lpfc_bsg_event *evt;
+	struct lpfc_bsg_iocb *iocb;
+	struct lpfc_bsg_mbox *mbox;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+	struct bsg_job_data *dd_data;
+	unsigned long flags;
 
-	if (cmdiocb)
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = (struct bsg_job_data *)job->dd_data;
+	/* timeout and completion crossed paths if no dd_data */
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		return 0;
+	}
+
+	switch (dd_data->type) {
+	case TYPE_IOCB:
+		iocb = &dd_data->context_un.iocb;
+		cmdiocb = iocb->cmdiocbq;
+		/* hint to completion handler that the job timed out */
+		job->reply->result = -EAGAIN;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		/* this will call our completion handler */
+		spin_lock_irq(&phba->hbalock);
 		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+		spin_unlock_irq(&phba->hbalock);
+		break;
+	case TYPE_EVT:
+		evt = dd_data->context_un.evt;
+		/* this event has no job anymore */
+		evt->set_job = NULL;
+		job->dd_data = NULL;
+		job->reply->reply_payload_rcv_len = 0;
+		/* Return -EAGAIN which is our way of signallying the
+		 * app to retry.
+		 */
+		job->reply->result = -EAGAIN;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		job->job_done(job);
+		break;
+	case TYPE_MBOX:
+		mbox = &dd_data->context_un.mbox;
+		/* this mbox has no job anymore */
+		mbox->set_job = NULL;
+		job->dd_data = NULL;
+		job->reply->reply_payload_rcv_len = 0;
+		job->reply->result = -EAGAIN;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		job->job_done(job);
+		break;
+	default:
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		break;
+	}
 
+	/* scsi transport fc fc_bsg_job_timeout expects a zero return code,
+	 * otherwise an error message will be displayed on the console
+	 * so always return success (zero)
+	 */
 	return 0;
 }
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
new file mode 100644
index 0000000..6c8f87e
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -0,0 +1,98 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2010 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+/* bsg definitions
+ * No pointers to user data are allowed, all application buffers and sizes will
+ * derived through the bsg interface.
+ *
+ * These are the vendor unique structures passed in using the bsg
+ * FC_BSG_HST_VENDOR message code type.
+ */
+#define LPFC_BSG_VENDOR_SET_CT_EVENT	1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT	2
+#define LPFC_BSG_VENDOR_SEND_MGMT_RESP	3
+#define LPFC_BSG_VENDOR_DIAG_MODE	4
+#define LPFC_BSG_VENDOR_DIAG_TEST	5
+#define LPFC_BSG_VENDOR_GET_MGMT_REV	6
+#define LPFC_BSG_VENDOR_MBOX		7
+
+struct set_ct_event {
+	uint32_t command;
+	uint32_t type_mask;
+	uint32_t ev_req_id;
+	uint32_t ev_reg_id;
+};
+
+struct get_ct_event {
+	uint32_t command;
+	uint32_t ev_reg_id;
+	uint32_t ev_req_id;
+};
+
+struct get_ct_event_reply {
+	uint32_t immed_data;
+	uint32_t type;
+};
+
+struct send_mgmt_resp {
+	uint32_t command;
+	uint32_t tag;
+};
+
+
+#define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */
+#define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */
+
+struct diag_mode_set {
+	uint32_t command;
+	uint32_t type;
+	uint32_t timeout;
+};
+
+struct diag_mode_test {
+	uint32_t command;
+};
+
+#define LPFC_WWNN_TYPE		0
+#define LPFC_WWPN_TYPE		1
+
+struct get_mgmt_rev {
+	uint32_t command;
+};
+
+#define MANAGEMENT_MAJOR_REV   1
+#define MANAGEMENT_MINOR_REV   0
+
+/* the MgmtRevInfo structure */
+struct MgmtRevInfo {
+	uint32_t a_Major;
+	uint32_t a_Minor;
+};
+
+struct get_mgmt_rev_reply {
+	struct MgmtRevInfo info;
+};
+
+struct dfc_mbox_req {
+	uint32_t command;
+	uint32_t inExtWLen;
+	uint32_t outExtWLen;
+	uint8_t mbOffset;
+};
+
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 650494d..6f0fb51 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -44,18 +44,26 @@
 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
 void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
+void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
+			struct lpfc_nodelist *);
 void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
 void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
 void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_supported_pages(struct lpfcMboxq *);
+void lpfc_sli4_params(struct lpfcMboxq *);
+int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
 
 struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
 void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
 void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
 void lpfc_cleanup_rpis(struct lpfc_vport *, int);
+void lpfc_cleanup_pending_mbox(struct lpfc_vport *);
 int lpfc_linkdown(struct lpfc_hba *);
 void lpfc_linkdown_port(struct lpfc_vport *);
 void lpfc_port_link_failure(struct lpfc_vport *);
 void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_retry_pport_discovery(struct lpfc_hba *);
 
 void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -73,6 +81,7 @@
 int  lpfc_can_disctmo(struct lpfc_vport *);
 int  lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
 void lpfc_unreg_all_rpis(struct lpfc_vport *);
+void lpfc_unreg_hba_rpis(struct lpfc_hba *);
 void lpfc_unreg_default_rpis(struct lpfc_vport *);
 void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
 
@@ -99,7 +108,7 @@
 
 void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
 int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
-		     struct serv_parm *, uint32_t);
+		     struct serv_parm *, uint32_t, int);
 int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
 void lpfc_more_plogi(struct lpfc_vport *);
 void lpfc_more_adisc(struct lpfc_vport *);
@@ -197,6 +206,7 @@
 void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
 void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
 int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
+void lpfc_issue_init_vpi(struct lpfc_vport *);
 
 void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
 	uint32_t , LPFC_MBOXQ_t *);
@@ -206,7 +216,11 @@
 void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
 void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
 			uint16_t);
+void lpfc_unregister_fcf(struct lpfc_hba *);
+void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
 void lpfc_unregister_unused_fcf(struct lpfc_hba *);
+int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
+void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
 
 int lpfc_mem_alloc(struct lpfc_hba *, int align);
 void lpfc_mem_free(struct lpfc_hba *);
@@ -365,6 +379,8 @@
 void lpfc_create_static_vport(struct lpfc_hba *);
 void lpfc_stop_hba_timers(struct lpfc_hba *);
 void lpfc_stop_port(struct lpfc_hba *);
+void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
+void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
 void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
 int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
 void lpfc_start_fdiscs(struct lpfc_hba *phba);
@@ -378,5 +394,5 @@
 /* functions to support SGIOv4/bsg interface */
 int lpfc_bsg_request(struct fc_bsg_job *);
 int lpfc_bsg_timeout(struct fc_bsg_job *);
-void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
 			     struct lpfc_iocbq *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 0ebcd9b..c7e9219 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -97,7 +97,8 @@
 	struct list_head head;
 	struct lpfc_dmabuf *bdeBuf;
 
-	lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
+	if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
+		return;
 
 	if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
 		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
@@ -181,7 +182,8 @@
 	uint32_t size;
 
 	/* Forward abort event to any process registered to receive ct event */
-	lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
+	if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
+		return;
 
 	/* If there is no BDE associated with IOCB, there is nothing to do */
 	if (icmd->ulpBdeCount == 0)
@@ -1843,12 +1845,7 @@
 		c  = (rev & 0x0000ff00) >> 8;
 		b4 = (rev & 0x000000ff);
 
-		if (flag)
-			sprintf(fwrevision, "%d.%d%d%c%d ", b1,
-				b2, b3, c, b4);
-		else
-			sprintf(fwrevision, "%d.%d%d%c%d ", b1,
-				b2, b3, c, b4);
+		sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4);
 	}
 	return;
 }
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2cc3968..08b6634 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -50,9 +50,6 @@
 				struct lpfc_nodelist *ndlp, uint8_t retry);
 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
 				  struct lpfc_iocbq *iocb);
-static void lpfc_register_new_vport(struct lpfc_hba *phba,
-				    struct lpfc_vport *vport,
-				    struct lpfc_nodelist *ndlp);
 
 static int lpfc_max_els_tries = 3;
 
@@ -592,6 +589,15 @@
 			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
 			spin_unlock_irq(shost->host_lock);
 		}
+		/*
+		 * If VPI is unreged, driver need to do INIT_VPI
+		 * before re-registering
+		 */
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+			spin_unlock_irq(shost->host_lock);
+		}
 	}
 
 	if (phba->sli_rev < LPFC_SLI_REV4) {
@@ -604,10 +610,13 @@
 	} else {
 		ndlp->nlp_type |= NLP_FABRIC;
 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
-		if (vport->vpi_state & LPFC_VPI_REGISTERED) {
+		if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
+			(vport->vpi_state & LPFC_VPI_REGISTERED)) {
 			lpfc_start_fdiscs(phba);
 			lpfc_do_scr_ns_plogi(phba, vport);
-		} else
+		} else if (vport->fc_flag & FC_VFI_REGISTERED)
+			lpfc_issue_init_vpi(vport);
+		else
 			lpfc_issue_reg_vfi(vport);
 	}
 	return 0;
@@ -804,6 +813,9 @@
 				 irsp->ulpTimeout);
 		goto flogifail;
 	}
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+	spin_unlock_irq(shost->host_lock);
 
 	/*
 	 * The FLogI succeeded.  Sync the data for the CPU before
@@ -2720,7 +2732,7 @@
 	if (did == FDMI_DID)
 		retry = 1;
 
-	if ((cmd == ELS_CMD_FLOGI) &&
+	if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
 	    (phba->fc_topology != TOPOLOGY_LOOP) &&
 	    !lpfc_error_lost_link(irsp)) {
 		/* FLOGI retry policy */
@@ -4385,7 +4397,7 @@
 
 	did = Fabric_DID;
 
-	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) {
+	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
 		/* For a FLOGI we accept, then if our portname is greater
 		 * then the remote portname we initiate Nport login.
 		 */
@@ -5915,6 +5927,7 @@
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
 	MAILBOX_t *mb = &pmb->u.mb;
+	int rc;
 
 	spin_lock_irq(shost->host_lock);
 	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5936,6 +5949,26 @@
 			spin_unlock_irq(shost->host_lock);
 			lpfc_can_disctmo(vport);
 			break;
+		/* If reg_vpi fail with invalid VPI status, re-init VPI */
+		case 0x20:
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			spin_unlock_irq(shost->host_lock);
+			lpfc_init_vpi(phba, pmb, vport->vpi);
+			pmb->vport = vport;
+			pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
+			rc = lpfc_sli_issue_mbox(phba, pmb,
+				MBX_NOWAIT);
+			if (rc == MBX_NOT_FINISHED) {
+				lpfc_printf_vlog(vport,
+					KERN_ERR, LOG_MBOX,
+					"2732 Failed to issue INIT_VPI"
+					" mailbox command\n");
+			} else {
+				lpfc_nlp_put(ndlp);
+				return;
+			}
+
 		default:
 			/* Try to recover from this error */
 			lpfc_mbx_unreg_vpi(vport);
@@ -5949,13 +5982,17 @@
 			break;
 		}
 	} else {
+		spin_lock_irq(shost->host_lock);
 		vport->vpi_state |= LPFC_VPI_REGISTERED;
-		if (vport == phba->pport)
+		spin_unlock_irq(shost->host_lock);
+		if (vport == phba->pport) {
 			if (phba->sli_rev < LPFC_SLI_REV4)
 				lpfc_issue_fabric_reglogin(vport);
-			else
-				lpfc_issue_reg_vfi(vport);
-		else
+			else {
+				lpfc_start_fdiscs(phba);
+				lpfc_do_scr_ns_plogi(phba, vport);
+			}
+		} else
 			lpfc_do_scr_ns_plogi(phba, vport);
 	}
 
@@ -5977,7 +6014,7 @@
  * This routine registers the @vport as a new virtual port with a HBA.
  * It is done through a registering vpi mailbox command.
  **/
-static void
+void
 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
 			struct lpfc_nodelist *ndlp)
 {
@@ -6018,6 +6055,78 @@
 }
 
 /**
+ * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine abort all pending discovery commands and
+ * start a timer to retry FLOGI for the physical port
+ * discovery.
+ **/
+void
+lpfc_retry_pport_discovery(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host  *shost;
+	int i;
+	uint32_t link_state;
+
+	/* Treat this failure as linkdown for all vports */
+	link_state = phba->link_state;
+	lpfc_linkdown(phba);
+	phba->link_state = link_state;
+
+	vports = lpfc_create_vport_work_array(phba);
+
+	if (vports) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
+			if (ndlp)
+				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
+			lpfc_els_flush_cmd(vports[i]);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+	}
+
+	/* If fabric require FLOGI, then re-instantiate physical login */
+	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+	if (!ndlp)
+		return;
+
+
+	shost = lpfc_shost_from_vport(phba->pport);
+	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+	ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
+	phba->pport->port_state = LPFC_FLOGI;
+	return;
+}
+
+/**
+ * lpfc_fabric_login_reqd - Check if FLOGI required.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to FDISC command iocb.
+ * @rspiocb: pointer to FDISC response iocb.
+ *
+ * This routine checks if a FLOGI is reguired for FDISC
+ * to succeed.
+ **/
+static int
+lpfc_fabric_login_reqd(struct lpfc_hba *phba,
+		struct lpfc_iocbq *cmdiocb,
+		struct lpfc_iocbq *rspiocb)
+{
+
+	if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
+		(rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
+		return 0;
+	else
+		return 1;
+}
+
+/**
  * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
  * @phba: pointer to lpfc hba data structure.
  * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -6066,6 +6175,12 @@
 		irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
 
 	if (irsp->ulpStatus) {
+
+		if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
+			lpfc_retry_pport_discovery(phba);
+			goto out;
+		}
+
 		/* Check for retry */
 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
 			goto out;
@@ -6076,6 +6191,7 @@
 		goto fdisc_failed;
 	}
 	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
 	vport->fc_flag |= FC_FABRIC;
 	if (vport->phba->fc_topology == TOPOLOGY_LOOP)
 		vport->fc_flag |=  FC_PUBLIC_LOOP;
@@ -6103,10 +6219,13 @@
 		lpfc_mbx_unreg_vpi(vport);
 		spin_lock_irq(shost->host_lock);
 		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+		vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
 		spin_unlock_irq(shost->host_lock);
 	}
 
-	if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+	if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
+		lpfc_issue_init_vpi(vport);
+	else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
 		lpfc_register_new_vport(phba, vport, ndlp);
 	else
 		lpfc_do_scr_ns_plogi(phba, vport);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2445e39..2359d0b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -525,6 +525,8 @@
 			spin_unlock_irq(&phba->hbalock);
 			lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
 		}
+		if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
+			lpfc_sli4_fcf_redisc_event_proc(phba);
 	}
 
 	vports = lpfc_create_vport_work_array(phba);
@@ -706,6 +708,8 @@
 void
 lpfc_port_link_failure(struct lpfc_vport *vport)
 {
+	lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+
 	/* Cleanup any outstanding received buffers */
 	lpfc_cleanup_rcv_buffers(vport);
 
@@ -752,12 +756,14 @@
 	lpfc_scsi_dev_block(phba);
 
 	spin_lock_irq(&phba->hbalock);
-	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
+	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+	spin_unlock_irq(&phba->hbalock);
 	if (phba->link_state > LPFC_LINK_DOWN) {
 		phba->link_state = LPFC_LINK_DOWN;
+		spin_lock_irq(shost->host_lock);
 		phba->pport->fc_flag &= ~FC_LBIT;
+		spin_unlock_irq(shost->host_lock);
 	}
-	spin_unlock_irq(&phba->hbalock);
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
@@ -1023,7 +1029,7 @@
 		return;
 	}
 	spin_lock_irqsave(&phba->hbalock, flags);
-	phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+	phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
 	phba->hba_flag &= ~FCF_DISC_INPROGRESS;
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 	if (vport->port_state != LPFC_FLOGI)
@@ -1045,25 +1051,23 @@
 static uint32_t
 lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
 {
-	if ((fab_name[0] ==
-		bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
-	    (fab_name[1] ==
-		bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
-	    (fab_name[2] ==
-		bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
-	    (fab_name[3] ==
-		bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
-	    (fab_name[4] ==
-		bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
-	    (fab_name[5] ==
-		bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
-	    (fab_name[6] ==
-		bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
-	    (fab_name[7] ==
-		bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
-		return 1;
-	else
+	if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
 		return 0;
+	if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
+		return 0;
+	if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
+		return 0;
+	if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
+		return 0;
+	if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
+		return 0;
+	if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
+		return 0;
+	if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
+		return 0;
+	if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
+		return 0;
+	return 1;
 }
 
 /**
@@ -1078,30 +1082,28 @@
 static uint32_t
 lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
 {
-	if ((sw_name[0] ==
-		bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) &&
-	    (sw_name[1] ==
-		bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) &&
-	    (sw_name[2] ==
-		bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) &&
-	    (sw_name[3] ==
-		bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) &&
-	    (sw_name[4] ==
-		bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) &&
-	    (sw_name[5] ==
-		bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) &&
-	    (sw_name[6] ==
-		bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) &&
-	    (sw_name[7] ==
-		bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)))
-		return 1;
-	else
+	if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
 		return 0;
+	if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
+		return 0;
+	if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
+		return 0;
+	if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
+		return 0;
+	if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
+		return 0;
+	if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
+		return 0;
+	if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
+		return 0;
+	if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
+		return 0;
+	return 1;
 }
 
 /**
  * lpfc_mac_addr_match - Check if the fcf mac address match.
- * @phba: pointer to lpfc hba data structure.
+ * @mac_addr: pointer to mac address.
  * @new_fcf_record: pointer to fcf record.
  *
  * This routine compare the fcf record's mac address with HBA's
@@ -1109,85 +1111,115 @@
  * returns 1 else return 0.
  **/
 static uint32_t
-lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
+lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
 {
-	if ((phba->fcf.mac_addr[0] ==
-		bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
-	    (phba->fcf.mac_addr[1] ==
-		bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
-	    (phba->fcf.mac_addr[2] ==
-		bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
-	    (phba->fcf.mac_addr[3] ==
-		bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
-	    (phba->fcf.mac_addr[4] ==
-		bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
-	    (phba->fcf.mac_addr[5] ==
-		bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
-		return 1;
-	else
+	if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
 		return 0;
+	if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
+		return 0;
+	if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
+		return 0;
+	if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
+		return 0;
+	if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
+		return 0;
+	if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
+		return 0;
+	return 1;
+}
+
+static bool
+lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
+{
+	return (curr_vlan_id == new_vlan_id);
 }
 
 /**
  * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
- * @phba: pointer to lpfc hba data structure.
+ * @fcf: pointer to driver fcf record.
  * @new_fcf_record: pointer to fcf record.
  *
  * This routine copies the FCF information from the FCF
  * record to lpfc_hba data structure.
  **/
 static void
-lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
+lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
+		     struct fcf_record *new_fcf_record)
 {
-	phba->fcf.fabric_name[0] =
+	/* Fabric name */
+	fcf_rec->fabric_name[0] =
 		bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
-	phba->fcf.fabric_name[1] =
+	fcf_rec->fabric_name[1] =
 		bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
-	phba->fcf.fabric_name[2] =
+	fcf_rec->fabric_name[2] =
 		bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
-	phba->fcf.fabric_name[3] =
+	fcf_rec->fabric_name[3] =
 		bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
-	phba->fcf.fabric_name[4] =
+	fcf_rec->fabric_name[4] =
 		bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
-	phba->fcf.fabric_name[5] =
+	fcf_rec->fabric_name[5] =
 		bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
-	phba->fcf.fabric_name[6] =
+	fcf_rec->fabric_name[6] =
 		bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
-	phba->fcf.fabric_name[7] =
+	fcf_rec->fabric_name[7] =
 		bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
-	phba->fcf.mac_addr[0] =
-		bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
-	phba->fcf.mac_addr[1] =
-		bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
-	phba->fcf.mac_addr[2] =
-		bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
-	phba->fcf.mac_addr[3] =
-		bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
-	phba->fcf.mac_addr[4] =
-		bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
-	phba->fcf.mac_addr[5] =
-		bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
-	phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
-	phba->fcf.priority = new_fcf_record->fip_priority;
-	phba->fcf.switch_name[0] =
+	/* Mac address */
+	fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
+	fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
+	fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
+	fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
+	fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
+	fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
+	/* FCF record index */
+	fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+	/* FCF record priority */
+	fcf_rec->priority = new_fcf_record->fip_priority;
+	/* Switch name */
+	fcf_rec->switch_name[0] =
 		bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
-	phba->fcf.switch_name[1] =
+	fcf_rec->switch_name[1] =
 		bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
-	phba->fcf.switch_name[2] =
+	fcf_rec->switch_name[2] =
 		bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
-	phba->fcf.switch_name[3] =
+	fcf_rec->switch_name[3] =
 		bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
-	phba->fcf.switch_name[4] =
+	fcf_rec->switch_name[4] =
 		bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
-	phba->fcf.switch_name[5] =
+	fcf_rec->switch_name[5] =
 		bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
-	phba->fcf.switch_name[6] =
+	fcf_rec->switch_name[6] =
 		bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
-	phba->fcf.switch_name[7] =
+	fcf_rec->switch_name[7] =
 		bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
 }
 
 /**
+ * lpfc_update_fcf_record - Update driver fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_rec: pointer to driver fcf record.
+ * @new_fcf_record: pointer to hba fcf record.
+ * @addr_mode: address mode to be set to the driver fcf record.
+ * @vlan_id: vlan tag to be set to the driver fcf record.
+ * @flag: flag bits to be set to the driver fcf record.
+ *
+ * This routine updates the driver FCF record from the new HBA FCF record
+ * together with the address mode, vlan_id, and other informations. This
+ * routine is called with the host lock held.
+ **/
+static void
+__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
+		       struct fcf_record *new_fcf_record, uint32_t addr_mode,
+		       uint16_t vlan_id, uint32_t flag)
+{
+	/* Copy the fields from the HBA's FCF record */
+	lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
+	/* Update other fields of driver FCF record */
+	fcf_rec->addr_mode = addr_mode;
+	fcf_rec->vlan_id = vlan_id;
+	fcf_rec->flag |= (flag | RECORD_VALID);
+}
+
+/**
  * lpfc_register_fcf - Register the FCF with hba.
  * @phba: pointer to lpfc hba data structure.
  *
@@ -1212,7 +1244,7 @@
 
 	/* The FCF is already registered, start discovery */
 	if (phba->fcf.fcf_flag & FCF_REGISTERED) {
-		phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+		phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
 		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
 		spin_unlock_irqrestore(&phba->hbalock, flags);
 		if (phba->pport->port_state != LPFC_FLOGI)
@@ -1250,6 +1282,7 @@
  * @new_fcf_record: pointer to fcf record.
  * @boot_flag: Indicates if this record used by boot bios.
  * @addr_mode: The address mode to be used by this FCF
+ * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
  *
  * This routine compare the fcf record with connect list obtained from the
  * config region to decide if this FCF can be used for SAN discovery. It returns
@@ -1323,7 +1356,8 @@
 		return 1;
 	}
 
-	list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
+	list_for_each_entry(conn_entry,
+			    &phba->fcf_conn_rec_list, list) {
 		if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
 			continue;
 
@@ -1470,6 +1504,7 @@
 		 */
 		spin_lock_irq(&phba->hbalock);
 		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+		phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
 		spin_unlock_irq(&phba->hbalock);
 	}
 
@@ -1524,11 +1559,12 @@
 	uint32_t shdr_status, shdr_add_status;
 	union lpfc_sli4_cfg_shdr *shdr;
 	struct fcf_record *new_fcf_record;
-	int rc;
 	uint32_t boot_flag, addr_mode;
 	uint32_t next_fcf_index;
-	unsigned long flags;
+	struct lpfc_fcf_rec *fcf_rec = NULL;
+	unsigned long iflags;
 	uint16_t vlan_id;
+	int rc;
 
 	/* If there is pending FCoE event restart FCF table scan */
 	if (lpfc_check_pending_fcoe_event(phba, 0)) {
@@ -1583,9 +1619,8 @@
 			      sizeof(struct fcf_record));
 	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
 
-	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
-				      &boot_flag, &addr_mode,
-					&vlan_id);
+	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+				      &addr_mode, &vlan_id);
 	/*
 	 * If the fcf record does not match with connect list entries
 	 * read the next entry.
@@ -1594,90 +1629,159 @@
 		goto read_next_fcf;
 	/*
 	 * If this is not the first FCF discovery of the HBA, use last
-	 * FCF record for the discovery.
+	 * FCF record for the discovery. The condition that a rescan
+	 * matches the in-use FCF record: fabric name, switch name, mac
+	 * address, and vlan_id.
 	 */
-	spin_lock_irqsave(&phba->hbalock, flags);
+	spin_lock_irqsave(&phba->hbalock, iflags);
 	if (phba->fcf.fcf_flag & FCF_IN_USE) {
-		if (lpfc_fab_name_match(phba->fcf.fabric_name,
+		if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
 					new_fcf_record) &&
-		    lpfc_sw_name_match(phba->fcf.switch_name,
+		    lpfc_sw_name_match(phba->fcf.current_rec.switch_name,
 					new_fcf_record) &&
-		    lpfc_mac_addr_match(phba, new_fcf_record)) {
+		    lpfc_mac_addr_match(phba->fcf.current_rec.mac_addr,
+					new_fcf_record) &&
+		    lpfc_vlan_id_match(phba->fcf.current_rec.vlan_id,
+					vlan_id)) {
 			phba->fcf.fcf_flag |= FCF_AVAILABLE;
-			spin_unlock_irqrestore(&phba->hbalock, flags);
+			if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
+				/* Stop FCF redisc wait timer if pending */
+				__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+			else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+				/* If in fast failover, mark it's completed */
+				phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
 			goto out;
 		}
-		spin_unlock_irqrestore(&phba->hbalock, flags);
-		goto read_next_fcf;
+		/*
+		 * Read next FCF record from HBA searching for the matching
+		 * with in-use record only if not during the fast failover
+		 * period. In case of fast failover period, it shall try to
+		 * determine whether the FCF record just read should be the
+		 * next candidate.
+		 */
+		if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			goto read_next_fcf;
+		}
 	}
+	/*
+	 * Update on failover FCF record only if it's in FCF fast-failover
+	 * period; otherwise, update on current FCF record.
+	 */
+	if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
+		/* Fast FCF failover only to the same fabric name */
+		if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
+					new_fcf_record))
+			fcf_rec = &phba->fcf.failover_rec;
+		else
+			goto read_next_fcf;
+	} else
+		fcf_rec = &phba->fcf.current_rec;
+
 	if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
 		/*
-		 * If the current FCF record does not have boot flag
-		 * set and new fcf record has boot flag set, use the
-		 * new fcf record.
+		 * If the driver FCF record does not have boot flag
+		 * set and new hba fcf record has boot flag set, use
+		 * the new hba fcf record.
 		 */
-		if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
-			/* Use this FCF record */
-			lpfc_copy_fcf_record(phba, new_fcf_record);
-			phba->fcf.addr_mode = addr_mode;
-			phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
-			if (vlan_id != 0xFFFF) {
-				phba->fcf.fcf_flag |= FCF_VALID_VLAN;
-				phba->fcf.vlan_id = vlan_id;
-			}
-			spin_unlock_irqrestore(&phba->hbalock, flags);
+		if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
+			/* Choose this FCF record */
+			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+					addr_mode, vlan_id, BOOT_ENABLE);
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
 			goto read_next_fcf;
 		}
 		/*
-		 * If the current FCF record has boot flag set and the
-		 * new FCF record does not have boot flag, read the next
-		 * FCF record.
+		 * If the driver FCF record has boot flag set and the
+		 * new hba FCF record does not have boot flag, read
+		 * the next FCF record.
 		 */
-		if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
-			spin_unlock_irqrestore(&phba->hbalock, flags);
+		if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
 			goto read_next_fcf;
 		}
 		/*
-		 * If there is a record with lower priority value for
-		 * the current FCF, use that record.
+		 * If the new hba FCF record has lower priority value
+		 * than the driver FCF record, use the new record.
 		 */
-		if (lpfc_fab_name_match(phba->fcf.fabric_name,
-					new_fcf_record) &&
-		    (new_fcf_record->fip_priority < phba->fcf.priority)) {
-			/* Use this FCF record */
-			lpfc_copy_fcf_record(phba, new_fcf_record);
-			phba->fcf.addr_mode = addr_mode;
-			if (vlan_id != 0xFFFF) {
-				phba->fcf.fcf_flag |= FCF_VALID_VLAN;
-				phba->fcf.vlan_id = vlan_id;
-			}
-			spin_unlock_irqrestore(&phba->hbalock, flags);
-			goto read_next_fcf;
+		if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) &&
+		    (new_fcf_record->fip_priority < fcf_rec->priority)) {
+			/* Choose this FCF record */
+			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+					addr_mode, vlan_id, 0);
 		}
-		spin_unlock_irqrestore(&phba->hbalock, flags);
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
 		goto read_next_fcf;
 	}
 	/*
-	 * This is the first available FCF record, use this
-	 * record.
+	 * This is the first suitable FCF record, choose this record for
+	 * initial best-fit FCF.
 	 */
-	lpfc_copy_fcf_record(phba, new_fcf_record);
-	phba->fcf.addr_mode = addr_mode;
-	if (boot_flag)
-		phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
-	phba->fcf.fcf_flag |= FCF_AVAILABLE;
-	if (vlan_id != 0xFFFF) {
-		phba->fcf.fcf_flag |= FCF_VALID_VLAN;
-		phba->fcf.vlan_id = vlan_id;
+	if (fcf_rec) {
+		__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+					 addr_mode, vlan_id, (boot_flag ?
+					 BOOT_ENABLE : 0));
+		phba->fcf.fcf_flag |= FCF_AVAILABLE;
 	}
-	spin_unlock_irqrestore(&phba->hbalock, flags);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
 	goto read_next_fcf;
 
 read_next_fcf:
 	lpfc_sli4_mbox_cmd_free(phba, mboxq);
-	if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
-		lpfc_register_fcf(phba);
-	else
+	if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
+		if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
+			/*
+			 * Case of FCF fast failover scan
+			 */
+
+			/*
+			 * It has not found any suitable FCF record, cancel
+			 * FCF scan inprogress, and do nothing
+			 */
+			if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
+				spin_lock_irqsave(&phba->hbalock, iflags);
+				phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+				spin_unlock_irqrestore(&phba->hbalock, iflags);
+				return;
+			}
+			/*
+			 * It has found a suitable FCF record that is not
+			 * the same as in-use FCF record, unregister the
+			 * in-use FCF record, replace the in-use FCF record
+			 * with the new FCF record, mark FCF fast failover
+			 * completed, and then start register the new FCF
+			 * record.
+			 */
+
+			/* unregister the current in-use FCF record */
+			lpfc_unregister_fcf(phba);
+			/* replace in-use record with the new record */
+			memcpy(&phba->fcf.current_rec,
+			       &phba->fcf.failover_rec,
+			       sizeof(struct lpfc_fcf_rec));
+			/* mark the FCF fast failover completed */
+			spin_lock_irqsave(&phba->hbalock, iflags);
+			phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			/* Register to the new FCF record */
+			lpfc_register_fcf(phba);
+		} else {
+			/*
+			 * In case of transaction period to fast FCF failover,
+			 * do nothing when search to the end of the FCF table.
+			 */
+			if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
+			    (phba->fcf.fcf_flag & FCF_REDISC_PEND))
+				return;
+			/*
+			 * Otherwise, initial scan or post linkdown rescan,
+			 * register with the best fit FCF record found so
+			 * far through the scanning process.
+			 */
+			lpfc_register_fcf(phba);
+		}
+	} else
 		lpfc_sli4_read_fcf_record(phba, next_fcf_index);
 	return;
 
@@ -1695,10 +1799,13 @@
  *
  * This function handles completion of init vpi mailbox command.
  */
-static void
+void
 lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 {
 	struct lpfc_vport *vport = mboxq->vport;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
 	if (mboxq->u.mb.mbxStatus) {
 		lpfc_printf_vlog(vport, KERN_ERR,
 				LOG_MBOX,
@@ -1708,9 +1815,23 @@
 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 		return;
 	}
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irq(shost->host_lock);
 	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irq(shost->host_lock);
+
+	/* If this port is physical port or FDISC is done, do reg_vpi */
+	if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
+			ndlp = lpfc_findnode_did(vport, Fabric_DID);
+			if (!ndlp)
+				lpfc_printf_vlog(vport, KERN_ERR,
+					LOG_DISCOVERY,
+					"2731 Cannot find fabric "
+					"controller node\n");
+			else
+				lpfc_register_new_vport(phba, vport, ndlp);
+			mempool_free(mboxq, phba->mbox_mem_pool);
+			return;
+	}
 
 	if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
 		lpfc_initial_fdisc(vport);
@@ -1719,10 +1840,42 @@
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 				 "2606 No NPIV Fabric support\n");
 	}
+	mempool_free(mboxq, phba->mbox_mem_pool);
 	return;
 }
 
 /**
+ * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
+ * @vport: pointer to lpfc_vport data structure.
+ *
+ * This function issue a init_vpi mailbox command to initialize
+ * VPI for the vport.
+ */
+void
+lpfc_issue_init_vpi(struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *mboxq;
+	int rc;
+
+	mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2607 Failed to allocate "
+			"init_vpi mailbox\n");
+		return;
+	}
+	lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
+	mboxq->vport = vport;
+	mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
+	rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
+		mempool_free(mboxq, vport->phba->mbox_mem_pool);
+	}
+}
+
+/**
  * lpfc_start_fdiscs - send fdiscs for each vports on this port.
  * @phba: pointer to lpfc hba data structure.
  *
@@ -1734,8 +1887,6 @@
 {
 	struct lpfc_vport **vports;
 	int i;
-	LPFC_MBOXQ_t *mboxq;
-	int rc;
 
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL) {
@@ -1754,26 +1905,7 @@
 				continue;
 			}
 			if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
-				mboxq = mempool_alloc(phba->mbox_mem_pool,
-					GFP_KERNEL);
-				if (!mboxq) {
-					lpfc_printf_vlog(vports[i], KERN_ERR,
-					LOG_MBOX, "2607 Failed to allocate "
-					"init_vpi mailbox\n");
-					continue;
-				}
-				lpfc_init_vpi(phba, mboxq, vports[i]->vpi);
-				mboxq->vport = vports[i];
-				mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
-				rc = lpfc_sli_issue_mbox(phba, mboxq,
-					MBX_NOWAIT);
-				if (rc == MBX_NOT_FINISHED) {
-					lpfc_printf_vlog(vports[i], KERN_ERR,
-					LOG_MBOX, "2608 Failed to issue "
-					"init_vpi mailbox\n");
-					mempool_free(mboxq,
-						phba->mbox_mem_pool);
-				}
+				lpfc_issue_init_vpi(vports[i]);
 				continue;
 			}
 			if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
@@ -1796,6 +1928,7 @@
 {
 	struct lpfc_dmabuf *dmabuf = mboxq->context1;
 	struct lpfc_vport *vport = mboxq->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
 	if (mboxq->u.mb.mbxStatus) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1813,7 +1946,11 @@
 		goto fail_free_mem;
 	}
 	/* The VPI is implicitly registered when the VFI is registered */
+	spin_lock_irq(shost->host_lock);
 	vport->vpi_state |= LPFC_VPI_REGISTERED;
+	vport->fc_flag |= FC_VFI_REGISTERED;
+	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+	spin_unlock_irq(shost->host_lock);
 
 	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
 		lpfc_start_fdiscs(phba);
@@ -2050,8 +2187,7 @@
 			return;
 		}
 		spin_unlock_irq(&phba->hbalock);
-		rc = lpfc_sli4_read_fcf_record(phba,
-					LPFC_FCOE_FCF_GET_FIRST);
+		rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
 		if (rc)
 			goto out;
 	}
@@ -2139,10 +2275,12 @@
 	}
 
 	phba->fc_eventTag = la->eventTag;
+	spin_lock_irq(&phba->hbalock);
 	if (la->mm)
 		phba->sli.sli_flag |= LPFC_MENLO_MAINT;
 	else
 		phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
+	spin_unlock_irq(&phba->hbalock);
 
 	phba->link_events++;
 	if (la->attType == AT_LINK_UP && (!la->mm)) {
@@ -2271,10 +2409,10 @@
 				 mb->mbxStatus);
 		break;
 	}
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irq(shost->host_lock);
 	vport->vpi_state &= ~LPFC_VPI_REGISTERED;
 	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irq(shost->host_lock);
 	vport->unreg_vpi_cmpl = VPORT_OK;
 	mempool_free(pmb, phba->mbox_mem_pool);
 	/*
@@ -2332,7 +2470,10 @@
 		goto out;
 	}
 
+	spin_lock_irq(shost->host_lock);
 	vport->vpi_state |= LPFC_VPI_REGISTERED;
+	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+	spin_unlock_irq(shost->host_lock);
 	vport->num_disc_nodes = 0;
 	/* go thru NPR list and issue ELS PLOGIs */
 	if (vport->fc_npr_cnt)
@@ -3218,6 +3359,34 @@
 	return 0;
 }
 
+/**
+ * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unregister all the currently registered RPIs
+ * to the HBA.
+ **/
+void
+lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+		shost = lpfc_shost_from_vport(vports[i]);
+		spin_lock_irq(shost->host_lock);
+		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
+			if (ndlp->nlp_flag & NLP_RPI_VALID)
+				lpfc_unreg_rpi(vports[i], ndlp);
+		}
+		spin_unlock_irq(shost->host_lock);
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
 void
 lpfc_unreg_all_rpis(struct lpfc_vport *vport)
 {
@@ -4448,6 +4617,195 @@
 }
 
 /**
+ * lpfc_unregister_fcf_prep - Unregister fcf record preparation
+ * @phba: Pointer to hba context object.
+ *
+ * This function prepare the HBA for unregistering the currently registered
+ * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
+ * VFIs.
+ */
+int
+lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_vport **vports;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+	int i, rc;
+
+	/* Unregister RPIs */
+	if (lpfc_fcf_inuse(phba))
+		lpfc_unreg_hba_rpis(phba);
+
+	/* At this point, all discovery is aborted */
+	phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+
+	/* Unregister VPIs */
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			/* Stop FLOGI/FDISC retries */
+			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
+			if (ndlp)
+				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
+			lpfc_mbx_unreg_vpi(vports[i]);
+			shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(shost->host_lock);
+			vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+			spin_unlock_irq(shost->host_lock);
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_all_cmd(phba);
+
+	/* Unregister VFI */
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2556 UNREG_VFI mbox allocation failed"
+				"HBA state x%x\n", phba->pport->port_state);
+		return -ENOMEM;
+	}
+
+	lpfc_unreg_vfi(mbox, phba->pport);
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
+
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2557 UNREG_VFI issue mbox failed rc x%x "
+				"HBA state x%x\n",
+				rc, phba->pport->port_state);
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return -EIO;
+	}
+
+	shost = lpfc_shost_from_vport(phba->pport);
+	spin_lock_irq(shost->host_lock);
+	phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
+	spin_unlock_irq(shost->host_lock);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
+ * @phba: Pointer to hba context object.
+ *
+ * This function issues synchronous unregister FCF mailbox command to HBA to
+ * unregister the currently registered FCF record. The driver does not reset
+ * the driver FCF usage state flags.
+ *
+ * Return 0 if successfully issued, none-zero otherwise.
+ */
+int
+lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2551 UNREG_FCFI mbox allocation failed"
+				"HBA state x%x\n", phba->pport->port_state);
+		return -ENOMEM;
+	}
+	lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2552 Unregister FCFI command failed rc x%x "
+				"HBA state x%x\n",
+				rc, phba->pport->port_state);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
+ * @phba: Pointer to hba context object.
+ *
+ * This function unregisters the currently reigstered FCF. This function
+ * also tries to find another FCF for discovery by rescan the HBA FCF table.
+ */
+void
+lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
+{
+	int rc;
+
+	/* Preparation for unregistering fcf */
+	rc = lpfc_unregister_fcf_prep(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+				"2748 Failed to prepare for unregistering "
+				"HBA's FCF record: rc=%d\n", rc);
+		return;
+	}
+
+	/* Now, unregister FCF record and reset HBA FCF state */
+	rc = lpfc_sli4_unregister_fcf(phba);
+	if (rc)
+		return;
+	/* Reset HBA FCF states after successful unregister FCF */
+	phba->fcf.fcf_flag = 0;
+
+	/*
+	 * If driver is not unloading, check if there is any other
+	 * FCF record that can be used for discovery.
+	 */
+	if ((phba->pport->load_flag & FC_UNLOADING) ||
+	    (phba->link_state < LPFC_LINK_UP))
+		return;
+
+	rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+
+	if (rc)
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2553 lpfc_unregister_unused_fcf failed "
+				"to read FCF record HBA state x%x\n",
+				phba->pport->port_state);
+}
+
+/**
+ * lpfc_unregister_fcf - Unregister the currently registered fcf record
+ * @phba: Pointer to hba context object.
+ *
+ * This function just unregisters the currently reigstered FCF. It does not
+ * try to find another FCF for discovery.
+ */
+void
+lpfc_unregister_fcf(struct lpfc_hba *phba)
+{
+	int rc;
+
+	/* Preparation for unregistering fcf */
+	rc = lpfc_unregister_fcf_prep(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+				"2749 Failed to prepare for unregistering "
+				"HBA's FCF record: rc=%d\n", rc);
+		return;
+	}
+
+	/* Now, unregister FCF record and reset HBA FCF state */
+	rc = lpfc_sli4_unregister_fcf(phba);
+	if (rc)
+		return;
+	/* Set proper HBA FCF states after successful unregister FCF */
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag &= ~FCF_REGISTERED;
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
  * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
  * @phba: Pointer to hba context object.
  *
@@ -4458,21 +4816,14 @@
 void
 lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
 {
-	LPFC_MBOXQ_t *mbox;
-	int rc;
-	struct lpfc_vport **vports;
-	int i;
-
-	spin_lock_irq(&phba->hbalock);
 	/*
-	 * If HBA is not running in FIP mode or
-	 * If HBA does not support FCoE or
-	 * If FCF is not registered.
-	 * do nothing.
+	 * If HBA is not running in FIP mode or if HBA does not support
+	 * FCoE or if FCF is not registered, do nothing.
 	 */
+	spin_lock_irq(&phba->hbalock);
 	if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
-		!(phba->fcf.fcf_flag & FCF_REGISTERED) ||
-		(!(phba->hba_flag & HBA_FIP_SUPPORT))) {
+	    !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
+	    !(phba->hba_flag & HBA_FIP_SUPPORT)) {
 		spin_unlock_irq(&phba->hbalock);
 		return;
 	}
@@ -4481,91 +4832,7 @@
 	if (lpfc_fcf_inuse(phba))
 		return;
 
-	/* At this point, all discovery is aborted */
-	phba->pport->port_state = LPFC_VPORT_UNKNOWN;
-
-	/* Unregister VPIs */
-	vports = lpfc_create_vport_work_array(phba);
-	if (vports &&
-		(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
-		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
-			lpfc_mbx_unreg_vpi(vports[i]);
-			spin_lock_irq(&phba->hbalock);
-			vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
-			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
-			spin_unlock_irq(&phba->hbalock);
-		}
-	lpfc_destroy_vport_work_array(phba, vports);
-
-	/* Unregister VFI */
-	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-	if (!mbox) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
-			"2556 UNREG_VFI mbox allocation failed"
-			"HBA state x%x\n",
-			phba->pport->port_state);
-		return;
-	}
-
-	lpfc_unreg_vfi(mbox, phba->pport);
-	mbox->vport = phba->pport;
-	mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
-
-	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
-	if (rc == MBX_NOT_FINISHED) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
-			"2557 UNREG_VFI issue mbox failed rc x%x "
-			"HBA state x%x\n",
-			rc, phba->pport->port_state);
-		mempool_free(mbox, phba->mbox_mem_pool);
-		return;
-	}
-
-	/* Unregister FCF */
-	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-	if (!mbox) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
-			"2551 UNREG_FCFI mbox allocation failed"
-			"HBA state x%x\n",
-			phba->pport->port_state);
-		return;
-	}
-
-	lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
-	mbox->vport = phba->pport;
-	mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
-	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
-
-	if (rc == MBX_NOT_FINISHED) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
-			"2552 UNREG_FCFI issue mbox failed rc x%x "
-			"HBA state x%x\n",
-			rc, phba->pport->port_state);
-		mempool_free(mbox, phba->mbox_mem_pool);
-		return;
-	}
-
-	spin_lock_irq(&phba->hbalock);
-	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
-		FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
-		FCF_VALID_VLAN);
-	spin_unlock_irq(&phba->hbalock);
-
-	/*
-	 * If driver is not unloading, check if there is any other
-	 * FCF record that can be used for discovery.
-	 */
-	if ((phba->pport->load_flag & FC_UNLOADING) ||
-		(phba->link_state < LPFC_LINK_UP))
-		return;
-
-	rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
-
-	if (rc)
-		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
-			"2553 lpfc_unregister_unused_fcf failed to read FCF"
-			" record HBA state x%x\n",
-			phba->pport->port_state);
+	lpfc_unregister_fcf_rescan(phba);
 }
 
 /**
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index c9faa1d..89ff7c0 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -1346,6 +1346,9 @@
 #define MBX_HEARTBEAT       0x31
 #define MBX_WRITE_VPARMS    0x32
 #define MBX_ASYNCEVT_ENABLE 0x33
+#define MBX_READ_EVENT_LOG_STATUS 0x37
+#define MBX_READ_EVENT_LOG  0x38
+#define MBX_WRITE_EVENT_LOG 0x39
 
 #define MBX_PORT_CAPABILITIES 0x3B
 #define MBX_PORT_IOV_CONTROL 0x3C
@@ -1465,17 +1468,13 @@
 #define CMD_IOCB_LOGENTRY_CN		0x94
 #define CMD_IOCB_LOGENTRY_ASYNC_CN	0x96
 
-/* Unhandled Data Security SLI Commands */
-#define DSSCMD_IWRITE64_CR 		0xD8
-#define DSSCMD_IWRITE64_CX		0xD9
-#define DSSCMD_IREAD64_CR		0xDA
-#define DSSCMD_IREAD64_CX		0xDB
-#define DSSCMD_INVALIDATE_DEK		0xDC
-#define DSSCMD_SET_KEK			0xDD
-#define DSSCMD_GET_KEK_ID		0xDE
-#define DSSCMD_GEN_XFER			0xDF
+/* Data Security SLI Commands */
+#define DSSCMD_IWRITE64_CR		0xF8
+#define DSSCMD_IWRITE64_CX		0xF9
+#define DSSCMD_IREAD64_CR		0xFA
+#define DSSCMD_IREAD64_CX		0xFB
 
-#define CMD_MAX_IOCB_CMD        0xE6
+#define CMD_MAX_IOCB_CMD        0xFB
 #define CMD_IOCB_MASK           0xff
 
 #define MAX_MSG_DATA            28	/* max msg data in CMD_ADAPTER_MSG
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 8a2a1c5..820015f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -52,35 +52,37 @@
 	uint32_t addr_hi;
 };
 
-#define LPFC_SLIREV_CONF_WORD	0x58
 struct lpfc_sli_intf {
 	uint32_t word0;
-#define lpfc_sli_intf_iftype_MASK 	0x00000007
-#define lpfc_sli_intf_iftype_SHIFT	0
-#define lpfc_sli_intf_iftype_WORD	word0
-#define lpfc_sli_intf_rev_MASK 		0x0000000f
-#define lpfc_sli_intf_rev_SHIFT		4
-#define lpfc_sli_intf_rev_WORD		word0
-#define LPFC_SLIREV_CONF_SLI4	4
-#define lpfc_sli_intf_family_MASK 	0x000000ff
-#define lpfc_sli_intf_family_SHIFT	8
-#define lpfc_sli_intf_family_WORD	word0
-#define lpfc_sli_intf_feat1_MASK 	0x000000ff
-#define lpfc_sli_intf_feat1_SHIFT	16
-#define lpfc_sli_intf_feat1_WORD	word0
-#define lpfc_sli_intf_feat2_MASK 	0x0000001f
-#define lpfc_sli_intf_feat2_SHIFT	24
-#define lpfc_sli_intf_feat2_WORD	word0
-#define lpfc_sli_intf_valid_MASK 	0x00000007
-#define lpfc_sli_intf_valid_SHIFT	29
-#define lpfc_sli_intf_valid_WORD	word0
+#define lpfc_sli_intf_valid_SHIFT		29
+#define lpfc_sli_intf_valid_MASK		0x00000007
+#define lpfc_sli_intf_valid_WORD		word0
 #define LPFC_SLI_INTF_VALID		6
+#define lpfc_sli_intf_featurelevel2_SHIFT	24
+#define lpfc_sli_intf_featurelevel2_MASK	0x0000001F
+#define lpfc_sli_intf_featurelevel2_WORD	word0
+#define lpfc_sli_intf_featurelevel1_SHIFT	16
+#define lpfc_sli_intf_featurelevel1_MASK	0x000000FF
+#define lpfc_sli_intf_featurelevel1_WORD	word0
+#define LPFC_SLI_INTF_FEATURELEVEL1_1	1
+#define LPFC_SLI_INTF_FEATURELEVEL1_2	2
+#define lpfc_sli_intf_sli_family_SHIFT		8
+#define lpfc_sli_intf_sli_family_MASK		0x000000FF
+#define lpfc_sli_intf_sli_family_WORD		word0
+#define LPFC_SLI_INTF_FAMILY_BE2	0
+#define LPFC_SLI_INTF_FAMILY_BE3	1
+#define lpfc_sli_intf_slirev_SHIFT		4
+#define lpfc_sli_intf_slirev_MASK		0x0000000F
+#define lpfc_sli_intf_slirev_WORD		word0
+#define LPFC_SLI_INTF_REV_SLI3		3
+#define LPFC_SLI_INTF_REV_SLI4		4
+#define lpfc_sli_intf_if_type_SHIFT		0
+#define lpfc_sli_intf_if_type_MASK		0x00000007
+#define lpfc_sli_intf_if_type_WORD		word0
+#define LPFC_SLI_INTF_IF_TYPE_0		0
+#define LPFC_SLI_INTF_IF_TYPE_1		1
 };
 
-#define LPFC_SLI4_BAR0		1
-#define LPFC_SLI4_BAR1		2
-#define LPFC_SLI4_BAR2		4
-
 #define LPFC_SLI4_MBX_EMBED	true
 #define LPFC_SLI4_MBX_NEMBED	false
 
@@ -161,6 +163,9 @@
 #define LPFC_FP_DEF_IMAX       10000
 #define LPFC_SP_DEF_IMAX       10000
 
+/* PORT_CAPABILITIES constants. */
+#define LPFC_MAX_SUPPORTED_PAGES	8
+
 struct ulp_bde64 {
 	union ULP_BDE_TUS {
 		uint32_t w;
@@ -516,7 +521,7 @@
 #define LPFC_UERR_STATUS_LO		0x00A0
 #define LPFC_UE_MASK_HI			0x00AC
 #define LPFC_UE_MASK_LO			0x00A8
-#define LPFC_SCRATCHPAD			0x0058
+#define LPFC_SLI_INTF			0x0058
 
 /* BAR0 Registers */
 #define LPFC_HST_STATE			0x00AC
@@ -576,19 +581,6 @@
 #define LPFC_POST_STAGE_ARMFW_READY			0xC000
 #define LPFC_POST_STAGE_ARMFW_UE 			0xF000
 
-#define lpfc_scratchpad_slirev_SHIFT			4
-#define lpfc_scratchpad_slirev_MASK			0xF
-#define lpfc_scratchpad_slirev_WORD			word0
-#define lpfc_scratchpad_chiptype_SHIFT			8
-#define lpfc_scratchpad_chiptype_MASK			0xFF
-#define lpfc_scratchpad_chiptype_WORD			word0
-#define lpfc_scratchpad_featurelevel1_SHIFT		16
-#define lpfc_scratchpad_featurelevel1_MASK		0xFF
-#define lpfc_scratchpad_featurelevel1_WORD		word0
-#define lpfc_scratchpad_featurelevel2_SHIFT		24
-#define lpfc_scratchpad_featurelevel2_MASK		0xFF
-#define lpfc_scratchpad_featurelevel2_WORD		word0
-
 /* BAR1 Registers */
 #define LPFC_IMR_MASK_ALL	0xFFFFFFFF
 #define LPFC_ISCR_CLEAR_ALL	0xFFFFFFFF
@@ -801,6 +793,7 @@
 #define LPFC_MBOX_OPCODE_FCOE_ADD_FCF			0x09
 #define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF		0x0A
 #define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE		0x0B
+#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF		0x10
 
 /* Mailbox command structures */
 struct eq_context {
@@ -1149,10 +1142,7 @@
 						this  flag !! */
 #define lpfc_sli4_sge_last_MASK		0x00000001
 #define lpfc_sli4_sge_last_WORD		word2
-	uint32_t word3;
-#define lpfc_sli4_sge_len_SHIFT		0
-#define lpfc_sli4_sge_len_MASK		0x0001FFFF
-#define lpfc_sli4_sge_len_WORD		word3
+	uint32_t sge_len;
 };
 
 struct fcf_record {
@@ -1301,6 +1291,19 @@
 #define lpfc_mbx_del_fcf_tbl_index_WORD		word10
 };
 
+struct lpfc_mbx_redisc_fcf_tbl {
+	struct mbox_header header;
+	uint32_t word10;
+#define lpfc_mbx_redisc_fcf_count_SHIFT		0
+#define lpfc_mbx_redisc_fcf_count_MASK		0x0000FFFF
+#define lpfc_mbx_redisc_fcf_count_WORD		word10
+	uint32_t resvd;
+	uint32_t word12;
+#define lpfc_mbx_redisc_fcf_index_SHIFT		0
+#define lpfc_mbx_redisc_fcf_index_MASK		0x0000FFFF
+#define lpfc_mbx_redisc_fcf_index_WORD		word12
+};
+
 struct lpfc_mbx_query_fw_cfg {
 	struct mbox_header header;
 	uint32_t config_number;
@@ -1834,6 +1837,177 @@
 #define lpfc_mbx_rq_ftr_rsp_ifip_WORD		word3
 };
 
+struct lpfc_mbx_supp_pages {
+	uint32_t word1;
+#define qs_SHIFT 				0
+#define qs_MASK					0x00000001
+#define qs_WORD					word1
+#define wr_SHIFT				1
+#define wr_MASK 				0x00000001
+#define wr_WORD					word1
+#define pf_SHIFT				8
+#define pf_MASK					0x000000ff
+#define pf_WORD					word1
+#define cpn_SHIFT				16
+#define cpn_MASK				0x000000ff
+#define cpn_WORD				word1
+	uint32_t word2;
+#define list_offset_SHIFT 			0
+#define list_offset_MASK			0x000000ff
+#define list_offset_WORD			word2
+#define next_offset_SHIFT			8
+#define next_offset_MASK			0x000000ff
+#define next_offset_WORD			word2
+#define elem_cnt_SHIFT				16
+#define elem_cnt_MASK				0x000000ff
+#define elem_cnt_WORD				word2
+	uint32_t word3;
+#define pn_0_SHIFT				24
+#define pn_0_MASK  				0x000000ff
+#define pn_0_WORD				word3
+#define pn_1_SHIFT				16
+#define pn_1_MASK				0x000000ff
+#define pn_1_WORD				word3
+#define pn_2_SHIFT				8
+#define pn_2_MASK				0x000000ff
+#define pn_2_WORD				word3
+#define pn_3_SHIFT				0
+#define pn_3_MASK				0x000000ff
+#define pn_3_WORD				word3
+	uint32_t word4;
+#define pn_4_SHIFT				24
+#define pn_4_MASK				0x000000ff
+#define pn_4_WORD				word4
+#define pn_5_SHIFT				16
+#define pn_5_MASK				0x000000ff
+#define pn_5_WORD				word4
+#define pn_6_SHIFT				8
+#define pn_6_MASK				0x000000ff
+#define pn_6_WORD				word4
+#define pn_7_SHIFT				0
+#define pn_7_MASK				0x000000ff
+#define pn_7_WORD				word4
+	uint32_t rsvd[27];
+#define LPFC_SUPP_PAGES			0
+#define LPFC_BLOCK_GUARD_PROFILES	1
+#define LPFC_SLI4_PARAMETERS		2
+};
+
+struct lpfc_mbx_sli4_params {
+	uint32_t word1;
+#define qs_SHIFT				0
+#define qs_MASK					0x00000001
+#define qs_WORD					word1
+#define wr_SHIFT				1
+#define wr_MASK					0x00000001
+#define wr_WORD					word1
+#define pf_SHIFT				8
+#define pf_MASK					0x000000ff
+#define pf_WORD					word1
+#define cpn_SHIFT				16
+#define cpn_MASK				0x000000ff
+#define cpn_WORD				word1
+	uint32_t word2;
+#define if_type_SHIFT				0
+#define if_type_MASK				0x00000007
+#define if_type_WORD				word2
+#define sli_rev_SHIFT				4
+#define sli_rev_MASK				0x0000000f
+#define sli_rev_WORD				word2
+#define sli_family_SHIFT			8
+#define sli_family_MASK				0x000000ff
+#define sli_family_WORD				word2
+#define featurelevel_1_SHIFT			16
+#define featurelevel_1_MASK			0x000000ff
+#define featurelevel_1_WORD			word2
+#define featurelevel_2_SHIFT			24
+#define featurelevel_2_MASK			0x0000001f
+#define featurelevel_2_WORD			word2
+	uint32_t word3;
+#define fcoe_SHIFT 				0
+#define fcoe_MASK				0x00000001
+#define fcoe_WORD				word3
+#define fc_SHIFT				1
+#define fc_MASK					0x00000001
+#define fc_WORD					word3
+#define nic_SHIFT				2
+#define nic_MASK				0x00000001
+#define nic_WORD				word3
+#define iscsi_SHIFT				3
+#define iscsi_MASK				0x00000001
+#define iscsi_WORD				word3
+#define rdma_SHIFT				4
+#define rdma_MASK				0x00000001
+#define rdma_WORD				word3
+	uint32_t sge_supp_len;
+	uint32_t word5;
+#define if_page_sz_SHIFT			0
+#define if_page_sz_MASK				0x0000ffff
+#define if_page_sz_WORD				word5
+#define loopbk_scope_SHIFT			24
+#define loopbk_scope_MASK			0x0000000f
+#define loopbk_scope_WORD			word5
+#define rq_db_window_SHIFT			28
+#define rq_db_window_MASK			0x0000000f
+#define rq_db_window_WORD			word5
+	uint32_t word6;
+#define eq_pages_SHIFT				0
+#define eq_pages_MASK				0x0000000f
+#define eq_pages_WORD				word6
+#define eqe_size_SHIFT				8
+#define eqe_size_MASK				0x000000ff
+#define eqe_size_WORD				word6
+	uint32_t word7;
+#define cq_pages_SHIFT				0
+#define cq_pages_MASK				0x0000000f
+#define cq_pages_WORD				word7
+#define cqe_size_SHIFT				8
+#define cqe_size_MASK				0x000000ff
+#define cqe_size_WORD				word7
+	uint32_t word8;
+#define mq_pages_SHIFT				0
+#define mq_pages_MASK				0x0000000f
+#define mq_pages_WORD				word8
+#define mqe_size_SHIFT				8
+#define mqe_size_MASK				0x000000ff
+#define mqe_size_WORD				word8
+#define mq_elem_cnt_SHIFT			16
+#define mq_elem_cnt_MASK			0x000000ff
+#define mq_elem_cnt_WORD			word8
+	uint32_t word9;
+#define wq_pages_SHIFT				0
+#define wq_pages_MASK				0x0000ffff
+#define wq_pages_WORD				word9
+#define wqe_size_SHIFT				8
+#define wqe_size_MASK				0x000000ff
+#define wqe_size_WORD				word9
+	uint32_t word10;
+#define rq_pages_SHIFT				0
+#define rq_pages_MASK				0x0000ffff
+#define rq_pages_WORD				word10
+#define rqe_size_SHIFT				8
+#define rqe_size_MASK				0x000000ff
+#define rqe_size_WORD				word10
+	uint32_t word11;
+#define hdr_pages_SHIFT				0
+#define hdr_pages_MASK				0x0000000f
+#define hdr_pages_WORD				word11
+#define hdr_size_SHIFT				8
+#define hdr_size_MASK				0x0000000f
+#define hdr_size_WORD				word11
+#define hdr_pp_align_SHIFT			16
+#define hdr_pp_align_MASK			0x0000ffff
+#define hdr_pp_align_WORD			word11
+	uint32_t word12;
+#define sgl_pages_SHIFT				0
+#define sgl_pages_MASK				0x0000000f
+#define sgl_pages_WORD				word12
+#define sgl_pp_align_SHIFT			16
+#define sgl_pp_align_MASK			0x0000ffff
+#define sgl_pp_align_WORD			word12
+	uint32_t rsvd_13_63[51];
+};
+
 /* Mailbox Completion Queue Error Messages */
 #define MB_CQE_STATUS_SUCCESS 			0x0
 #define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES	0x1
@@ -1863,6 +2037,7 @@
 		struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
 		struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
 		struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
+		struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl;
 		struct lpfc_mbx_reg_fcfi reg_fcfi;
 		struct lpfc_mbx_unreg_fcfi unreg_fcfi;
 		struct lpfc_mbx_mq_create mq_create;
@@ -1883,6 +2058,8 @@
 		struct lpfc_mbx_request_features req_ftrs;
 		struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
 		struct lpfc_mbx_query_fw_cfg query_fw_cfg;
+		struct lpfc_mbx_supp_pages supp_pages;
+		struct lpfc_mbx_sli4_params sli4_params;
 		struct lpfc_mbx_nop nop;
 	} un;
 };
@@ -1959,6 +2136,9 @@
 #define LPFC_ASYNC_LINK_FAULT_NONE	0x0
 #define LPFC_ASYNC_LINK_FAULT_LOCAL	0x1
 #define LPFC_ASYNC_LINK_FAULT_REMOTE	0x2
+#define lpfc_acqe_qos_link_speed_SHIFT	16
+#define lpfc_acqe_qos_link_speed_MASK	0x0000FFFF
+#define lpfc_acqe_qos_link_speed_WORD	word1
 	uint32_t event_tag;
 	uint32_t trailer;
 };
@@ -1976,6 +2156,7 @@
 #define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL	0x2
 #define LPFC_FCOE_EVENT_TYPE_FCF_DEAD		0x3
 #define LPFC_FCOE_EVENT_TYPE_CVL		0x4
+#define LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD	0x5
 	uint32_t event_tag;
 	uint32_t trailer;
 };
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b8eb1b6..d29ac7c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -544,7 +544,7 @@
 			mempool_free(pmb, phba->mbox_mem_pool);
 			return -EIO;
 		}
-	} else {
+	} else if (phba->cfg_suppress_link_up == 0) {
 		lpfc_init_link(phba, pmb, phba->cfg_topology,
 			phba->cfg_link_speed);
 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -603,6 +603,102 @@
 }
 
 /**
+ * lpfc_hba_init_link - Initialize the FC link
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ *		0 - success
+ *		Any other value - error
+ **/
+int
+lpfc_hba_init_link(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	int rc;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+	mb = &pmb->u.mb;
+	pmb->vport = vport;
+
+	lpfc_init_link(phba, pmb, phba->cfg_topology,
+		phba->cfg_link_speed);
+	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	lpfc_set_loopback_flag(phba);
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0498 Adapter failed to init, mbxCmd x%x "
+			"INIT_LINK, mbxStatus x%x\n",
+			mb->mbxCommand, mb->mbxStatus);
+		/* Clear all interrupt enable conditions */
+		writel(0, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+		/* Clear all pending interrupts */
+		writel(0xffffffff, phba->HAregaddr);
+		readl(phba->HAregaddr); /* flush */
+		phba->link_state = LPFC_HBA_ERROR;
+		if (rc != MBX_BUSY)
+			mempool_free(pmb, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	phba->cfg_suppress_link_up = 0;
+
+	return 0;
+}
+
+/**
+ * lpfc_hba_down_link - this routine downs the FC link
+ *
+ * This routine will issue the DOWN_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use to stop the link.
+ *
+ * Return code
+ *		0 - success
+ *		Any other value - error
+ **/
+int
+lpfc_hba_down_link(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmb;
+	int rc;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
+	lpfc_printf_log(phba,
+		KERN_ERR, LOG_INIT,
+		"0491 Adapter Link is disabled.\n");
+	lpfc_down_link(phba, pmb);
+	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+		lpfc_printf_log(phba,
+		KERN_ERR, LOG_INIT,
+		"2522 Adapter failed to issue DOWN_LINK"
+		" mbox command rc 0x%x\n", rc);
+
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
  * @phba: pointer to lpfc HBA data structure.
  *
@@ -2073,6 +2169,44 @@
 }
 
 /**
+ * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
+ * caller of this routine should already hold the host lock.
+ **/
+void
+__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
+{
+	/* Clear pending FCF rediscovery wait timer */
+	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+	/* Now, try to stop the timer */
+	del_timer(&phba->fcf.redisc_wait);
+}
+
+/**
+ * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
+ * checks whether the FCF rediscovery wait timer is pending with the host
+ * lock held before proceeding with disabling the timer and clearing the
+ * wait timer pendig flag.
+ **/
+void
+lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
+{
+	spin_lock_irq(&phba->hbalock);
+	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
+		/* FCF rediscovery timer already fired or stopped */
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
  * @phba: pointer to lpfc hba data structure.
  *
@@ -2096,6 +2230,7 @@
 		break;
 	case LPFC_PCI_DEV_OC:
 		/* Stop any OneConnect device sepcific driver timers */
+		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
 		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2228,6 +2363,7 @@
 	struct lpfc_vport *vport = phba->pport;
 	struct lpfc_nodelist  *ndlp, *next_ndlp;
 	struct lpfc_vport **vports;
+	struct Scsi_Host *shost;
 	int i;
 
 	if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -2241,11 +2377,15 @@
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL) {
 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
-			struct Scsi_Host *shost;
-
 			if (vports[i]->load_flag & FC_UNLOADING)
 				continue;
+			shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(shost->host_lock);
 			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
+			spin_unlock_irq(shost->host_lock);
+
 			shost =	lpfc_shost_from_vport(vports[i]);
 			list_for_each_entry_safe(ndlp, next_ndlp,
 						 &vports[i]->fc_nodes,
@@ -2401,7 +2541,8 @@
 	shost->this_id = -1;
 	shost->max_cmd_len = 16;
 	if (phba->sli_rev == LPFC_SLI_REV4) {
-		shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
+		shost->dma_boundary =
+			phba->sli4_hba.pc_sli4_params.sge_supp_len;
 		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
 	}
 
@@ -2650,8 +2791,6 @@
 	lpfc_stop_hba_timers(phba);
 	phba->pport->work_port_events = 0;
 	phba->sli4_hba.intr_enable = 0;
-	/* Hard clear it for now, shall have more graceful way to wait later */
-	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
 }
 
 /**
@@ -2703,7 +2842,7 @@
 	del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
 	bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
 	bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
-	       phba->fcf.fcf_indx);
+	       phba->fcf.current_rec.fcf_indx);
 
 	if (!phba->sli4_hba.intr_enable)
 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -2727,6 +2866,57 @@
 }
 
 /**
+ * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
+ * @phba: Pointer to hba for which this call is being executed.
+ *
+ * This routine starts the timer waiting for the FCF rediscovery to complete.
+ **/
+void
+lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
+{
+	unsigned long fcf_redisc_wait_tmo =
+		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
+	/* Start fcf rediscovery wait period timer */
+	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
+	spin_lock_irq(&phba->hbalock);
+	/* Allow action to new fcf asynchronous event */
+	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+	/* Mark the FCF rediscovery pending state */
+	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine is invoked when waiting for FCF table rediscover has been
+ * timed out. If new FCF record(s) has (have) been discovered during the
+ * wait period, a new FCF event shall be added to the FCOE async event
+ * list, and then worker thread shall be waked up for processing from the
+ * worker thread context.
+ **/
+void
+lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
+{
+	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+
+	/* Don't send FCF rediscovery event if timer cancelled */
+	spin_lock_irq(&phba->hbalock);
+	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+	/* Clear FCF rediscovery timer pending flag */
+	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+	/* FCF rediscovery event to worker thread */
+	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
+	spin_unlock_irq(&phba->hbalock);
+	/* wake up worker thread */
+	lpfc_worker_wake_up(phba);
+}
+
+/**
  * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
  * @phba: pointer to lpfc hba data structure.
  *
@@ -2978,6 +3168,8 @@
 				bf_get(lpfc_acqe_link_physical, acqe_link);
 	phba->sli4_hba.link_state.fault =
 				bf_get(lpfc_acqe_link_fault, acqe_link);
+	phba->sli4_hba.link_state.logical_speed =
+				bf_get(lpfc_acqe_qos_link_speed, acqe_link);
 
 	/* Invoke the lpfc_handle_latt mailbox command callback function */
 	lpfc_mbx_cmpl_read_la(phba, pmb);
@@ -3007,22 +3199,34 @@
 	struct lpfc_nodelist *ndlp;
 	struct Scsi_Host  *shost;
 	uint32_t link_state;
+	int active_vlink_present;
+	struct lpfc_vport **vports;
+	int i;
 
 	phba->fc_eventTag = acqe_fcoe->event_tag;
 	phba->fcoe_eventtag = acqe_fcoe->event_tag;
 	switch (event_type) {
 	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
+	case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
 			"2546 New FCF found index 0x%x tag 0x%x\n",
 			acqe_fcoe->index,
 			acqe_fcoe->event_tag);
-		/*
-		 * If the current FCF is in discovered state, or
-		 * FCF discovery is in progress do nothing.
-		 */
 		spin_lock_irq(&phba->hbalock);
-		if ((phba->fcf.fcf_flag & FCF_DISCOVERED) ||
-		   (phba->hba_flag & FCF_DISC_INPROGRESS)) {
+		if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
+		    (phba->hba_flag & FCF_DISC_INPROGRESS)) {
+			/*
+			 * If the current FCF is in discovered state or
+			 * FCF discovery is in progress, do nothing.
+			 */
+			spin_unlock_irq(&phba->hbalock);
+			break;
+		}
+		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
+			/*
+			 * If fast FCF failover rescan event is pending,
+			 * do nothing.
+			 */
 			spin_unlock_irq(&phba->hbalock);
 			break;
 		}
@@ -3049,7 +3253,7 @@
 			" tag 0x%x\n", acqe_fcoe->index,
 			acqe_fcoe->event_tag);
 		/* If the event is not for currently used fcf do nothing */
-		if (phba->fcf.fcf_indx != acqe_fcoe->index)
+		if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
 			break;
 		/*
 		 * Currently, driver support only one FCF - so treat this as
@@ -3074,14 +3278,58 @@
 		if (!ndlp)
 			break;
 		shost = lpfc_shost_from_vport(vport);
+		if (phba->pport->port_state <= LPFC_FLOGI)
+			break;
+		/* If virtual link is not yet instantiated ignore CVL */
+		if (vport->port_state <= LPFC_FDISC)
+			break;
+
 		lpfc_linkdown_port(vport);
-		if (vport->port_type != LPFC_NPIV_PORT) {
+		lpfc_cleanup_pending_mbox(vport);
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_VPORT_CVL_RCVD;
+		spin_unlock_irq(shost->host_lock);
+		active_vlink_present = 0;
+
+		vports = lpfc_create_vport_work_array(phba);
+		if (vports) {
+			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+					i++) {
+				if ((!(vports[i]->fc_flag &
+					FC_VPORT_CVL_RCVD)) &&
+					(vports[i]->port_state > LPFC_FDISC)) {
+					active_vlink_present = 1;
+					break;
+				}
+			}
+			lpfc_destroy_vport_work_array(phba, vports);
+		}
+
+		if (active_vlink_present) {
+			/*
+			 * If there are other active VLinks present,
+			 * re-instantiate the Vlink using FDISC.
+			 */
 			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
 			spin_lock_irq(shost->host_lock);
 			ndlp->nlp_flag |= NLP_DELAY_TMO;
 			spin_unlock_irq(shost->host_lock);
-			ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
-			vport->port_state = LPFC_FLOGI;
+			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+			vport->port_state = LPFC_FDISC;
+		} else {
+			/*
+			 * Otherwise, we request port to rediscover
+			 * the entire FCF table for a fast recovery
+			 * from possible case that the current FCF
+			 * is no longer valid.
+			 */
+			rc = lpfc_sli4_redisc_fcf_table(phba);
+			if (rc)
+				/*
+				 * Last resort will be re-try on the
+				 * the current registered FCF entry.
+				 */
+				lpfc_retry_pport_discovery(phba);
 		}
 		break;
 	default:
@@ -3158,6 +3406,34 @@
 }
 
 /**
+ * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process FCF table
+ * rediscovery pending completion event.
+ **/
+void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
+{
+	int rc;
+
+	spin_lock_irq(&phba->hbalock);
+	/* Clear FCF rediscovery timeout event */
+	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
+	/* Clear driver fast failover FCF record flag */
+	phba->fcf.failover_rec.flag = 0;
+	/* Set state for FCF fast failover */
+	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Scan FCF table from the first entry to re-discover SAN */
+	rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+	if (rc)
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+				"2747 Post FCF rediscovery read FCF record "
+				"failed 0x%x\n", rc);
+}
+
+/**
  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
  * @phba: pointer to lpfc hba data structure.
  * @dev_grp: The HBA PCI-Device group number.
@@ -3442,8 +3718,10 @@
 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 {
 	struct lpfc_sli *psli;
-	int rc;
-	int i, hbq_count;
+	LPFC_MBOXQ_t *mboxq;
+	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
+	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
+	struct lpfc_mqe *mqe;
 
 	/* Before proceed, wait for POST done and device ready */
 	rc = lpfc_sli4_post_status_check(phba);
@@ -3472,6 +3750,11 @@
 	init_timer(&phba->eratt_poll);
 	phba->eratt_poll.function = lpfc_poll_eratt;
 	phba->eratt_poll.data = (unsigned long) phba;
+	/* FCF rediscover timer */
+	init_timer(&phba->fcf.redisc_wait);
+	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
+	phba->fcf.redisc_wait.data = (unsigned long)phba;
+
 	/*
 	 * We need to do a READ_CONFIG mailbox command here before
 	 * calling lpfc_get_cfgparam. For VFs this will report the
@@ -3496,31 +3779,26 @@
 	 * used to create the sg_dma_buf_pool must be dynamically calculated.
 	 * 2 segments are added since the IOCB needs a command and response bde.
 	 * To insure that the scsi sgl does not cross a 4k page boundary only
-	 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
-	 * Table of sgl sizes and seg_cnt:
-	 * sgl size, 	sg_seg_cnt	total seg
-	 * 1k		50		52
-	 * 2k		114		116
-	 * 4k		242		244
-	 * 8k		498		500
-	 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
-	 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
-	 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
-	 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
+	 * sgl sizes of must be a power of 2.
 	 */
-	if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
-		phba->cfg_sg_seg_cnt = 50;
-	else if (phba->cfg_sg_seg_cnt <= 114)
-		phba->cfg_sg_seg_cnt = 114;
-	else if (phba->cfg_sg_seg_cnt <= 242)
-		phba->cfg_sg_seg_cnt = 242;
+	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
+		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
+	/* Feature Level 1 hardware is limited to 2 pages */
+	if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
+	     LPFC_SLI_INTF_FEATURELEVEL1_1))
+		max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
 	else
-		phba->cfg_sg_seg_cnt = 498;
-
-	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
-					+ sizeof(struct fcp_rsp);
-	phba->cfg_sg_dma_buf_size +=
-		((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
+		max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
+	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
+	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
+	     dma_buf_size = dma_buf_size << 1)
+		;
+	if (dma_buf_size == max_buf_size)
+		phba->cfg_sg_seg_cnt = (dma_buf_size -
+			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
+			(2 * sizeof(struct sli4_sge))) /
+				sizeof(struct sli4_sge);
+	phba->cfg_sg_dma_buf_size = dma_buf_size;
 
 	/* Initialize buffer queue management fields */
 	hbq_count = lpfc_sli_hbq_count();
@@ -3638,6 +3916,43 @@
 		goto out_free_fcp_eq_hdl;
 	}
 
+	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+						       GFP_KERNEL);
+	if (!mboxq) {
+		rc = -ENOMEM;
+		goto out_free_fcp_eq_hdl;
+	}
+
+	/* Get the Supported Pages. It is always available. */
+	lpfc_supported_pages(mboxq);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (unlikely(rc)) {
+		rc = -EIO;
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		goto out_free_fcp_eq_hdl;
+	}
+
+	mqe = &mboxq->u.mqe;
+	memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+	       LPFC_MAX_SUPPORTED_PAGES);
+	for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+		switch (pn_page[i]) {
+		case LPFC_SLI4_PARAMETERS:
+			phba->sli4_hba.pc_sli4_params.supported = 1;
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* Read the port's SLI4 Parameters capabilities if supported. */
+	if (phba->sli4_hba.pc_sli4_params.supported)
+		rc = lpfc_pc_sli4_params_get(phba, mboxq);
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	if (rc) {
+		rc = -EIO;
+		goto out_free_fcp_eq_hdl;
+	}
 	return rc;
 
 out_free_fcp_eq_hdl:
@@ -3733,6 +4048,8 @@
 int
 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
 {
+	phba->lpfc_hba_init_link = lpfc_hba_init_link;
+	phba->lpfc_hba_down_link = lpfc_hba_down_link;
 	switch (dev_grp) {
 	case LPFC_PCI_DEV_LP:
 		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
@@ -4291,7 +4608,7 @@
 		return NULL;
 	}
 
-	mutex_init(&phba->ct_event_mutex);
+	spin_lock_init(&phba->ct_ev_lock);
 	INIT_LIST_HEAD(&phba->ct_ev_waiters);
 
 	return phba;
@@ -4641,7 +4958,7 @@
 int
 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
 {
-	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
+	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
 	int i, port_error = -ENODEV;
 
 	if (!phba->sli4_hba.STAregaddr)
@@ -4677,14 +4994,21 @@
 			bf_get(lpfc_hst_state_port_status, &sta_reg));
 
 	/* Log device information */
-	scratchpad.word0 =  readl(phba->sli4_hba.SCRATCHPADregaddr);
-	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-			"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
-			"FeatureL1=0x%x, FeatureL2=0x%x\n",
-			bf_get(lpfc_scratchpad_chiptype, &scratchpad),
-			bf_get(lpfc_scratchpad_slirev, &scratchpad),
-			bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
-			bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
+	phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
+	if (bf_get(lpfc_sli_intf_valid,
+		   &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
+				"FeatureL1=0x%x, FeatureL2=0x%x\n",
+				bf_get(lpfc_sli_intf_sli_family,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_slirev,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_featurelevel1,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_featurelevel2,
+				       &phba->sli4_hba.sli_intf));
+	}
 	phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
 	phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
 	/* With uncoverable error, log the error message and return error */
@@ -4723,8 +5047,8 @@
 					LPFC_UE_MASK_LO;
 	phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
 					LPFC_UE_MASK_HI;
-	phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
-					LPFC_SCRATCHPAD;
+	phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_SLI_INTF;
 }
 
 /**
@@ -5999,7 +6323,7 @@
 		spin_lock_irqsave(&phba->hbalock, flags);
 		/* Mark the FCFI is no longer registered */
 		phba->fcf.fcf_flag &=
-			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
+			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
 		spin_unlock_irqrestore(&phba->hbalock, flags);
 	}
 }
@@ -6039,16 +6363,20 @@
 
 	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
 	 * number of bytes required by each mapping. They are actually
-	 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
+	 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
 	 */
-	phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
-	bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
+	if (pci_resource_start(pdev, 0)) {
+		phba->pci_bar0_map = pci_resource_start(pdev, 0);
+		bar0map_len = pci_resource_len(pdev, 0);
+	} else {
+		phba->pci_bar0_map = pci_resource_start(pdev, 1);
+		bar0map_len = pci_resource_len(pdev, 1);
+	}
+	phba->pci_bar1_map = pci_resource_start(pdev, 2);
+	bar1map_len = pci_resource_len(pdev, 2);
 
-	phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
-	bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
-
-	phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
-	bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
+	phba->pci_bar2_map = pci_resource_start(pdev, 4);
+	bar2map_len = pci_resource_len(pdev, 4);
 
 	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
 	phba->sli4_hba.conf_regs_memmap_p =
@@ -6793,6 +7121,73 @@
 	phba->pport->work_port_events = 0;
 }
 
+ /**
+ * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion.  The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	int rc;
+	struct lpfc_mqe *mqe;
+	struct lpfc_pc_sli4_params *sli4_params;
+	uint32_t mbox_tmo;
+
+	rc = 0;
+	mqe = &mboxq->u.mqe;
+
+	/* Read the port's SLI4 Parameters port capabilities */
+	lpfc_sli4_params(mboxq);
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
+		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+	}
+
+	if (unlikely(rc))
+		return 1;
+
+	sli4_params = &phba->sli4_hba.pc_sli4_params;
+	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
+	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
+	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
+	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
+					     &mqe->un.sli4_params);
+	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
+					     &mqe->un.sli4_params);
+	sli4_params->proto_types = mqe->un.sli4_params.word3;
+	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
+	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
+	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
+	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
+	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
+	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
+	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
+	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
+	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
+	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
+	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
+	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
+	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
+	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
+	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
+	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
+	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
+	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
+	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
+	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+	return rc;
+}
+
 /**
  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
  * @pdev: pointer to PCI device
@@ -7134,6 +7529,12 @@
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
 
+	/*
+	 * As the new kernel behavior of pci_restore_state() API call clears
+	 * device saved_state flag, need to save the restored state again.
+	 */
+	pci_save_state(pdev);
+
 	if (pdev->is_busmaster)
 		pci_set_master(pdev);
 
@@ -7317,6 +7718,13 @@
 	}
 
 	pci_restore_state(pdev);
+
+	/*
+	 * As the new kernel behavior of pci_restore_state() API call clears
+	 * device saved_state flag, need to save the restored state again.
+	 */
+	pci_save_state(pdev);
+
 	if (pdev->is_busmaster)
 		pci_set_master(pdev);
 
@@ -7726,6 +8134,13 @@
 	/* Restore device state from PCI config space */
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
+
+	/*
+	 * As the new kernel behavior of pci_restore_state() API call clears
+	 * device saved_state flag, need to save the restored state again.
+	 */
+	pci_save_state(pdev);
+
 	if (pdev->is_busmaster)
 		pci_set_master(pdev);
 
@@ -7845,11 +8260,11 @@
 	int rc;
 	struct lpfc_sli_intf intf;
 
-	if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0))
+	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
 		return -ENODEV;
 
 	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
-		(bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4))
+	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
 		rc = lpfc_pci_probe_one_s4(pdev, pid);
 	else
 		rc = lpfc_pci_probe_one_s3(pdev, pid);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index a9afd8b..6c4dce1 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1707,7 +1707,8 @@
 				alloc_len - sizeof(union  lpfc_sli4_cfg_shdr);
 	}
 	/* The sub-header is in DMA memory, which needs endian converstion */
-	lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
+	if (cfg_shdr)
+		lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
 			      sizeof(union  lpfc_sli4_cfg_shdr));
 
 	return alloc_len;
@@ -1747,6 +1748,65 @@
 }
 
 /**
+ * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: index to fcf table.
+ *
+ * This routine routine allocates and constructs non-embedded mailbox command
+ * for reading a FCF table entry refered by @fcf_index.
+ *
+ * Return: pointer to the mailbox command constructed if successful, otherwise
+ * NULL.
+ **/
+int
+lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba,
+			      struct lpfcMboxq *mboxq,
+			      uint16_t fcf_index)
+{
+	void *virt_addr;
+	dma_addr_t phys_addr;
+	uint8_t *bytep;
+	struct lpfc_mbx_sge sge;
+	uint32_t alloc_len, req_len;
+	struct lpfc_mbx_read_fcf_tbl *read_fcf;
+
+	if (!mboxq)
+		return -ENOMEM;
+
+	req_len = sizeof(struct fcf_record) +
+		  sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
+
+	/* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
+	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+			LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
+			LPFC_SLI4_MBX_NEMBED);
+
+	if (alloc_len < req_len) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"0291 Allocated DMA memory size (x%x) is "
+				"less than the requested DMA memory "
+				"size (x%x)\n", alloc_len, req_len);
+		return -ENOMEM;
+	}
+
+	/* Get the first SGE entry from the non-embedded DMA memory. This
+	 * routine only uses a single SGE.
+	 */
+	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+	virt_addr = mboxq->sge_array->addr[0];
+	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+
+	/* Set up command fields */
+	bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
+	/* Perform necessary endian conversion */
+	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+	lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
+
+	return 0;
+}
+
+/**
  * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
  * @mboxq: pointer to lpfc mbox command.
  *
@@ -1946,13 +2006,14 @@
 	bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
 	bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
 	bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
-	bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
+	bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
+	       phba->fcf.current_rec.fcf_indx);
 	/* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
-	bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
-		(~phba->fcf.addr_mode) & 0x3);
-	if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
+	bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
+	if (phba->fcf.current_rec.vlan_id != 0xFFFF) {
 		bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
-		bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
+		bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
+		       phba->fcf.current_rec.vlan_id);
 	}
 }
 
@@ -1992,3 +2053,41 @@
 	bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
 	resume_rpi->event_tag = ndlp->phba->fc_eventTag;
 }
+
+/**
+ * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
+ *                        mailbox command.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The PORT_CAPABILITIES supported pages mailbox command is issued to
+ * retrieve the particular feature pages supported by the port.
+ **/
+void
+lpfc_supported_pages(struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_supp_pages *supp_pages;
+
+	memset(mbox, 0, sizeof(*mbox));
+	supp_pages = &mbox->u.mqe.un.supp_pages;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+	bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
+}
+
+/**
+ * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params
+ *                    mailbox command.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
+ * retrieve the particular SLI4 features supported by the port.
+ **/
+void
+lpfc_sli4_params(struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_sli4_params *sli4_params;
+
+	memset(mbox, 0, sizeof(*mbox));
+	sli4_params = &mbox->u.mqe.un.sli4_params;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+	bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
+}
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index d655ed3..f3cfbe2 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2008 Emulex.  All rights reserved.                *
+ * Copyright (C) 2010 Emulex.  All rights reserved.                *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -177,23 +177,3 @@
 	uint32_t data;
 };
 
-/* bsg definitions */
-#define LPFC_BSG_VENDOR_SET_CT_EVENT	1
-#define LPFC_BSG_VENDOR_GET_CT_EVENT	2
-
-struct set_ct_event {
-	uint32_t command;
-	uint32_t ev_req_id;
-	uint32_t ev_reg_id;
-};
-
-struct get_ct_event {
-	uint32_t command;
-	uint32_t ev_reg_id;
-	uint32_t ev_req_id;
-};
-
-struct get_ct_event_reply {
-	uint32_t immed_data;
-	uint32_t type;
-};
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 2ed6af1..d20ae6b 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -62,7 +62,7 @@
 
 int
 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
-		 struct serv_parm * sp, uint32_t class)
+		 struct serv_parm *sp, uint32_t class, int flogi)
 {
 	volatile struct serv_parm *hsp = &vport->fc_sparam;
 	uint16_t hsp_value, ssp_value = 0;
@@ -75,49 +75,56 @@
 	 * correcting the byte values.
 	 */
 	if (sp->cls1.classValid) {
-		hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) |
-				hsp->cls1.rcvDataSizeLsb;
-		ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
-				sp->cls1.rcvDataSizeLsb;
-		if (!ssp_value)
-			goto bad_service_param;
-		if (ssp_value > hsp_value) {
-			sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
-			sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
+		if (!flogi) {
+			hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
+				     hsp->cls1.rcvDataSizeLsb);
+			ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
+				     sp->cls1.rcvDataSizeLsb);
+			if (!ssp_value)
+				goto bad_service_param;
+			if (ssp_value > hsp_value) {
+				sp->cls1.rcvDataSizeLsb =
+					hsp->cls1.rcvDataSizeLsb;
+				sp->cls1.rcvDataSizeMsb =
+					hsp->cls1.rcvDataSizeMsb;
+			}
 		}
-	} else if (class == CLASS1) {
+	} else if (class == CLASS1)
 		goto bad_service_param;
-	}
-
 	if (sp->cls2.classValid) {
-		hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) |
-				hsp->cls2.rcvDataSizeLsb;
-		ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
-				sp->cls2.rcvDataSizeLsb;
-		if (!ssp_value)
-			goto bad_service_param;
-		if (ssp_value > hsp_value) {
-			sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
-			sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
+		if (!flogi) {
+			hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
+				     hsp->cls2.rcvDataSizeLsb);
+			ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
+				     sp->cls2.rcvDataSizeLsb);
+			if (!ssp_value)
+				goto bad_service_param;
+			if (ssp_value > hsp_value) {
+				sp->cls2.rcvDataSizeLsb =
+					hsp->cls2.rcvDataSizeLsb;
+				sp->cls2.rcvDataSizeMsb =
+					hsp->cls2.rcvDataSizeMsb;
+			}
 		}
-	} else if (class == CLASS2) {
+	} else if (class == CLASS2)
 		goto bad_service_param;
-	}
-
 	if (sp->cls3.classValid) {
-		hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) |
-				hsp->cls3.rcvDataSizeLsb;
-		ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
-				sp->cls3.rcvDataSizeLsb;
-		if (!ssp_value)
-			goto bad_service_param;
-		if (ssp_value > hsp_value) {
-			sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
-			sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
+		if (!flogi) {
+			hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
+				     hsp->cls3.rcvDataSizeLsb);
+			ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
+				     sp->cls3.rcvDataSizeLsb);
+			if (!ssp_value)
+				goto bad_service_param;
+			if (ssp_value > hsp_value) {
+				sp->cls3.rcvDataSizeLsb =
+					hsp->cls3.rcvDataSizeLsb;
+				sp->cls3.rcvDataSizeMsb =
+					hsp->cls3.rcvDataSizeMsb;
+			}
 		}
-	} else if (class == CLASS3) {
+	} else if (class == CLASS3)
 		goto bad_service_param;
-	}
 
 	/*
 	 * Preserve the upper four bits of the MSB from the PLOGI response.
@@ -247,7 +254,7 @@
 	int rc;
 
 	memset(&stat, 0, sizeof (struct ls_rjt));
-	if (vport->port_state <= LPFC_FLOGI) {
+	if (vport->port_state <= LPFC_FDISC) {
 		/* Before responding to PLOGI, check for pt2pt mode.
 		 * If we are pt2pt, with an outstanding FLOGI, abort
 		 * the FLOGI and resend it first.
@@ -295,7 +302,7 @@
 			NULL);
 		return 0;
 	}
-	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) {
+	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
 		/* Reject this request because invalid parameters */
 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
@@ -831,7 +838,7 @@
 				 "0142 PLOGI RSP: Invalid WWN.\n");
 		goto out;
 	}
-	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
+	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
 		goto out;
 	/* PLOGI chkparm OK */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a246410..7f21b47 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -626,6 +626,7 @@
 		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
 		if (psb->cur_iocbq.sli4_xritag == xri) {
 			list_del(&psb->list);
+			psb->exch_busy = 0;
 			psb->status = IOSTAT_SUCCESS;
 			spin_unlock_irqrestore(
 				&phba->sli4_hba.abts_scsi_buf_list_lock,
@@ -688,11 +689,12 @@
 					 list);
 			if (status) {
 				/* Put this back on the abort scsi list */
-				psb->status = IOSTAT_LOCAL_REJECT;
-				psb->result = IOERR_ABORT_REQUESTED;
+				psb->exch_busy = 1;
 				rc++;
-			} else
+			} else {
+				psb->exch_busy = 0;
 				psb->status = IOSTAT_SUCCESS;
+			}
 			/* Put it back into the SCSI buffer list */
 			lpfc_release_scsi_buf_s4(phba, psb);
 		}
@@ -796,19 +798,17 @@
 		 */
 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
-		bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
 		bf_set(lpfc_sli4_sge_last, sgl, 0);
 		sgl->word2 = cpu_to_le32(sgl->word2);
-		sgl->word3 = cpu_to_le32(sgl->word3);
+		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
 		sgl++;
 
 		/* Setup the physical region for the FCP RSP */
 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
-		bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
 		bf_set(lpfc_sli4_sge_last, sgl, 1);
 		sgl->word2 = cpu_to_le32(sgl->word2);
-		sgl->word3 = cpu_to_le32(sgl->word3);
+		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
 
 		/*
 		 * Since the IOCB for the FCP I/O is built into this
@@ -839,11 +839,12 @@
 						psb->cur_iocbq.sli4_xritag);
 			if (status) {
 				/* Put this back on the abort scsi list */
-				psb->status = IOSTAT_LOCAL_REJECT;
-				psb->result = IOERR_ABORT_REQUESTED;
+				psb->exch_busy = 1;
 				rc++;
-			} else
+			} else {
+				psb->exch_busy = 0;
 				psb->status = IOSTAT_SUCCESS;
+			}
 			/* Put it back into the SCSI buffer list */
 			lpfc_release_scsi_buf_s4(phba, psb);
 			break;
@@ -857,11 +858,12 @@
 				 list);
 			if (status) {
 				/* Put this back on the abort scsi list */
-				psb->status = IOSTAT_LOCAL_REJECT;
-				psb->result = IOERR_ABORT_REQUESTED;
+				psb->exch_busy = 1;
 				rc++;
-			} else
+			} else {
+				psb->exch_busy = 0;
 				psb->status = IOSTAT_SUCCESS;
+			}
 			/* Put it back into the SCSI buffer list */
 			lpfc_release_scsi_buf_s4(phba, psb);
 		}
@@ -951,8 +953,7 @@
 {
 	unsigned long iflag = 0;
 
-	if (psb->status == IOSTAT_LOCAL_REJECT
-		&& psb->result == IOERR_ABORT_REQUESTED) {
+	if (psb->exch_busy) {
 		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
 					iflag);
 		psb->pCmd = NULL;
@@ -1869,7 +1870,6 @@
 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
 			physaddr = sg_dma_address(sgel);
 			dma_len = sg_dma_len(sgel);
-			bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
 			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
 			if ((num_bde + 1) == nseg)
@@ -1878,7 +1878,7 @@
 				bf_set(lpfc_sli4_sge_last, sgl, 0);
 			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
 			sgl->word2 = cpu_to_le32(sgl->word2);
-			sgl->word3 = cpu_to_le32(sgl->word3);
+			sgl->sge_len = cpu_to_le32(dma_len);
 			dma_offset += dma_len;
 			sgl++;
 		}
@@ -2221,6 +2221,9 @@
 
 	lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
 	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
+	/* pick up SLI4 exhange busy status from HBA */
+	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+
 	if (pnode && NLP_CHK_NODE_ACT(pnode))
 		atomic_dec(&pnode->cmd_pending);
 
@@ -2637,6 +2640,7 @@
 	}
 	phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
 	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
+	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
 	return 0;
 }
 
@@ -2695,6 +2699,13 @@
 				 " port %s",
 				 phba->Port);
 		}
+		len = strlen(lpfcinfobuf);
+		if (phba->sli4_hba.link_state.logical_speed) {
+			snprintf(lpfcinfobuf + len,
+				 384-len,
+				 " Logical Link Speed: %d Mbps",
+				 phba->sli4_hba.link_state.logical_speed * 10);
+		}
 	}
 	return lpfcinfobuf;
 }
@@ -2990,6 +3001,7 @@
 
 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
 	abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
+	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
 
 	if (lpfc_is_link_up(phba))
 		icmd->ulpCommand = CMD_ABORT_XRI_CN;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 65dfc8b..5932273 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -118,6 +118,7 @@
 
 	uint32_t timeout;
 
+	uint16_t exch_busy;     /* SLI4 hba reported XB on complete WCQE */
 	uint16_t status;	/* From IOCB Word 7- ulpStatus */
 	uint32_t result;	/* From IOCB Word 4. */
 
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 589549b..35e3b96 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -580,10 +580,7 @@
 	else
 		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
 	if (sglq)  {
-		if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
-			&& ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
-			&& (iocbq->iocb.un.ulpWord[4]
-				== IOERR_ABORT_REQUESTED))) {
+		if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) {
 			spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
 					iflag);
 			list_add(&sglq->list,
@@ -764,10 +761,6 @@
 	case DSSCMD_IWRITE64_CX:
 	case DSSCMD_IREAD64_CR:
 	case DSSCMD_IREAD64_CX:
-	case DSSCMD_INVALIDATE_DEK:
-	case DSSCMD_SET_KEK:
-	case DSSCMD_GET_KEK_ID:
-	case DSSCMD_GEN_XFER:
 		type = LPFC_SOL_IOCB;
 		break;
 	case CMD_ABORT_XRI_CN:
@@ -1717,6 +1710,7 @@
 	struct lpfc_dmabuf *mp;
 	uint16_t rpi, vpi;
 	int rc;
+	struct lpfc_vport  *vport = pmb->vport;
 
 	mp = (struct lpfc_dmabuf *) (pmb->context1);
 
@@ -1745,6 +1739,18 @@
 			return;
 	}
 
+	/* Unreg VPI, if the REG_VPI succeed after VLink failure */
+	if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
+		!(phba->pport->load_flag & FC_UNLOADING) &&
+		!pmb->u.mb.mbxStatus) {
+		lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
+		pmb->vport = vport;
+		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+		if (rc != MBX_NOT_FINISHED)
+			return;
+	}
+
 	if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
 		lpfc_sli4_mbox_cmd_free(phba, pmb);
 	else
@@ -2228,9 +2234,15 @@
 			 * All other are passed to the completion callback.
 			 */
 			if (pring->ringno == LPFC_ELS_RING) {
-				if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
+				if ((phba->sli_rev < LPFC_SLI_REV4) &&
+				    (cmdiocbp->iocb_flag &
+							LPFC_DRIVER_ABORTED)) {
+					spin_lock_irqsave(&phba->hbalock,
+							  iflag);
 					cmdiocbp->iocb_flag &=
 						~LPFC_DRIVER_ABORTED;
+					spin_unlock_irqrestore(&phba->hbalock,
+							       iflag);
 					saveq->iocb.ulpStatus =
 						IOSTAT_LOCAL_REJECT;
 					saveq->iocb.un.ulpWord[4] =
@@ -2240,7 +2252,47 @@
 					 * of DMAing payload, so don't free data
 					 * buffer till after a hbeat.
 					 */
+					spin_lock_irqsave(&phba->hbalock,
+							  iflag);
 					saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+					spin_unlock_irqrestore(&phba->hbalock,
+							       iflag);
+				}
+				if ((phba->sli_rev == LPFC_SLI_REV4) &&
+				    (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) {
+					/* Set cmdiocb flag for the exchange
+					 * busy so sgl (xri) will not be
+					 * released until the abort xri is
+					 * received from hba, clear the
+					 * LPFC_DRIVER_ABORTED bit in case
+					 * it was driver initiated abort.
+					 */
+					spin_lock_irqsave(&phba->hbalock,
+							  iflag);
+					cmdiocbp->iocb_flag &=
+						~LPFC_DRIVER_ABORTED;
+					cmdiocbp->iocb_flag |=
+						LPFC_EXCHANGE_BUSY;
+					spin_unlock_irqrestore(&phba->hbalock,
+							       iflag);
+					cmdiocbp->iocb.ulpStatus =
+						IOSTAT_LOCAL_REJECT;
+					cmdiocbp->iocb.un.ulpWord[4] =
+						IOERR_ABORT_REQUESTED;
+					/*
+					 * For SLI4, irsiocb contains NO_XRI
+					 * in sli_xritag, it shall not affect
+					 * releasing sgl (xri) process.
+					 */
+					saveq->iocb.ulpStatus =
+						IOSTAT_LOCAL_REJECT;
+					saveq->iocb.un.ulpWord[4] =
+						IOERR_SLI_ABORTED;
+					spin_lock_irqsave(&phba->hbalock,
+							  iflag);
+					saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+					spin_unlock_irqrestore(&phba->hbalock,
+							       iflag);
 				}
 			}
 			(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -5687,19 +5739,19 @@
 
 		for (i = 0; i < numBdes; i++) {
 			/* Should already be byte swapped. */
-			sgl->addr_hi =  bpl->addrHigh;
-			sgl->addr_lo =  bpl->addrLow;
-			/* swap the size field back to the cpu so we
-			 * can assign it to the sgl.
-			 */
-			bde.tus.w  = le32_to_cpu(bpl->tus.w);
-			bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
+			sgl->addr_hi = bpl->addrHigh;
+			sgl->addr_lo = bpl->addrLow;
+
 			if ((i+1) == numBdes)
 				bf_set(lpfc_sli4_sge_last, sgl, 1);
 			else
 				bf_set(lpfc_sli4_sge_last, sgl, 0);
 			sgl->word2 = cpu_to_le32(sgl->word2);
-			sgl->word3 = cpu_to_le32(sgl->word3);
+			/* swap the size field back to the cpu so we
+			 * can assign it to the sgl.
+			 */
+			bde.tus.w = le32_to_cpu(bpl->tus.w);
+			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
 			bpl++;
 			sgl++;
 		}
@@ -5712,11 +5764,10 @@
 				cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
 			sgl->addr_lo =
 				cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
-			bf_set(lpfc_sli4_sge_len, sgl,
-				icmd->un.genreq64.bdl.bdeSize);
 			bf_set(lpfc_sli4_sge_last, sgl, 1);
 			sgl->word2 = cpu_to_le32(sgl->word2);
-			sgl->word3 = cpu_to_le32(sgl->word3);
+			sgl->sge_len =
+				cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
 	}
 	return sglq->sli4_xritag;
 }
@@ -5987,12 +6038,10 @@
 		else
 			bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
 		bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
-		abort_tag = iocbq->iocb.un.acxri.abortIoTag;
 		wqe->words[5] = 0;
 		bf_set(lpfc_wqe_gen_ct, &wqe->generic,
 			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
 		abort_tag = iocbq->iocb.un.acxri.abortIoTag;
-		wqe->generic.abort_tag = abort_tag;
 		/*
 		 * The abort handler will send us CMD_ABORT_XRI_CN or
 		 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
@@ -6121,15 +6170,15 @@
 	if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
 		return IOCB_ERROR;
 
-	if (piocb->iocb_flag &  LPFC_IO_FCP) {
+	if ((piocb->iocb_flag & LPFC_IO_FCP) ||
+		(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
 		/*
 		 * For FCP command IOCB, get a new WQ index to distribute
 		 * WQE across the WQsr. On the other hand, for abort IOCB,
 		 * it carries the same WQ index to the original command
 		 * IOCB.
 		 */
-		if ((piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
-		    (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN))
+		if (piocb->iocb_flag & LPFC_IO_FCP)
 			piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
 		if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
 				     &wqe))
@@ -7004,7 +7053,14 @@
 		    abort_iocb->iocb.ulpContext != abort_context ||
 		    (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
 			spin_unlock_irq(&phba->hbalock);
-		else {
+		else if (phba->sli_rev < LPFC_SLI_REV4) {
+			/*
+			 * leave the SLI4 aborted command on the txcmplq
+			 * list and the command complete WCQE's XB bit
+			 * will tell whether the SGL (XRI) can be released
+			 * immediately or to the aborted SGL list for the
+			 * following abort XRI from the HBA.
+			 */
 			list_del_init(&abort_iocb->list);
 			pring->txcmplq_cnt--;
 			spin_unlock_irq(&phba->hbalock);
@@ -7013,11 +7069,13 @@
 			 * payload, so don't free data buffer till after
 			 * a hbeat.
 			 */
+			spin_lock_irq(&phba->hbalock);
 			abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
-
 			abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+			spin_unlock_irq(&phba->hbalock);
+
 			abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
-			abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
+			abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
 			(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
 		}
 	}
@@ -7106,7 +7164,7 @@
 		return 0;
 
 	/* This signals the response to set the correct status
-	 * before calling the completion handler.
+	 * before calling the completion handler
 	 */
 	cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
 
@@ -7124,6 +7182,8 @@
 
 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
 	abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
+	if (cmdiocb->iocb_flag & LPFC_IO_FCP)
+		abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
 
 	if (phba->link_state >= LPFC_LINK_UP)
 		iabt->ulpCommand = CMD_ABORT_XRI_CN;
@@ -7330,6 +7390,8 @@
 
 		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
 		abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
+		if (iocbq->iocb_flag & LPFC_IO_FCP)
+			abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
 
 		if (lpfc_is_link_up(phba))
 			abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
@@ -8359,11 +8421,24 @@
 	}
 }
 
+/**
+ * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
+ * @phba: pointer to lpfc hba data structure
+ * @pIocbIn: pointer to the rspiocbq
+ * @pIocbOut: pointer to the cmdiocbq
+ * @wcqe: pointer to the complete wcqe
+ *
+ * This routine transfers the fields of a command iocbq to a response iocbq
+ * by copying all the IOCB fields from command iocbq and transferring the
+ * completion status information from the complete wcqe.
+ **/
 static void
-lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
+lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
+			      struct lpfc_iocbq *pIocbIn,
 			      struct lpfc_iocbq *pIocbOut,
 			      struct lpfc_wcqe_complete *wcqe)
 {
+	unsigned long iflags;
 	size_t offset = offsetof(struct lpfc_iocbq, iocb);
 
 	memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
@@ -8377,8 +8452,17 @@
 					wcqe->total_data_placed;
 		else
 			pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
-	else
+	else {
 		pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+		pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
+	}
+
+	/* Pick up HBA exchange busy condition */
+	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+	}
 }
 
 /**
@@ -8419,7 +8503,7 @@
 	}
 
 	/* Fake the irspiocbq and copy necessary response information */
-	lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
+	lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
 
 	return irspiocbq;
 }
@@ -8849,8 +8933,7 @@
 	int ecount = 0;
 	uint16_t cqid;
 
-	if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
-	    bf_get(lpfc_eqe_minor_code, eqe) != 0) {
+	if (bf_get(lpfc_eqe_major_code, eqe) != 0) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"0359 Not a valid slow-path completion "
 				"event: majorcode=x%x, minorcode=x%x\n",
@@ -8976,7 +9059,7 @@
 	}
 
 	/* Fake the irspiocb and copy necessary response information */
-	lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
+	lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
 
 	/* Pass the cmd_iocb and the rsp state to the upper layer */
 	(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
@@ -9082,8 +9165,7 @@
 	uint16_t cqid;
 	int ecount = 0;
 
-	if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
-	    unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
+	if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"0366 Not a valid fast-path completion "
 				"event: majorcode=x%x, minorcode=x%x\n",
@@ -11871,12 +11953,6 @@
 {
 	int rc = 0, error;
 	LPFC_MBOXQ_t *mboxq;
-	void *virt_addr;
-	dma_addr_t phys_addr;
-	uint8_t *bytep;
-	struct lpfc_mbx_sge sge;
-	uint32_t alloc_len, req_len;
-	struct lpfc_mbx_read_fcf_tbl *read_fcf;
 
 	phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -11887,43 +11963,19 @@
 		error = -ENOMEM;
 		goto fail_fcfscan;
 	}
-
-	req_len = sizeof(struct fcf_record) +
-		  sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
-
-	/* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
-	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
-			 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
-			 LPFC_SLI4_MBX_NEMBED);
-
-	if (alloc_len < req_len) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0291 Allocated DMA memory size (x%x) is "
-				"less than the requested DMA memory "
-				"size (x%x)\n", alloc_len, req_len);
-		error = -ENOMEM;
+	/* Construct the read FCF record mailbox command */
+	rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index);
+	if (rc) {
+		error = -EINVAL;
 		goto fail_fcfscan;
 	}
-
-	/* Get the first SGE entry from the non-embedded DMA memory. This
-	 * routine only uses a single SGE.
-	 */
-	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
-	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
-	virt_addr = mboxq->sge_array->addr[0];
-	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
-
-	/* Set up command fields */
-	bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
-	/* Perform necessary endian conversion */
-	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
-	lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
+	/* Issue the mailbox command asynchronously */
 	mboxq->vport = phba->pport;
 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
-	if (rc == MBX_NOT_FINISHED) {
+	if (rc == MBX_NOT_FINISHED)
 		error = -EIO;
-	} else {
+	else {
 		spin_lock_irq(&phba->hbalock);
 		phba->hba_flag |= FCF_DISC_INPROGRESS;
 		spin_unlock_irq(&phba->hbalock);
@@ -11942,6 +11994,90 @@
 }
 
 /**
+ * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the completion routine for the rediscover FCF table mailbox
+ * command. If the mailbox command returned failure, it will try to stop the
+ * FCF rediscover wait timer.
+ **/
+void
+lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+	uint32_t shdr_status, shdr_add_status;
+
+	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+
+	shdr_status = bf_get(lpfc_mbox_hdr_status,
+			     &redisc_fcf->header.cfg_shdr.response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+			     &redisc_fcf->header.cfg_shdr.response);
+	if (shdr_status || shdr_add_status) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2746 Requesting for FCF rediscovery failed "
+				"status x%x add_status x%x\n",
+				shdr_status, shdr_add_status);
+		/*
+		 * Request failed, last resort to re-try current
+		 * registered FCF entry
+		 */
+		lpfc_retry_pport_discovery(phba);
+	} else
+		/*
+		 * Start FCF rediscovery wait timer for pending FCF
+		 * before rescan FCF record table.
+		 */
+		lpfc_fcf_redisc_wait_start_timer(phba);
+
+	mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_redisc_all_fcf - Request to rediscover entire FCF table by port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request for rediscovery of the entire FCF table
+ * by the port.
+ **/
+int
+lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+	int rc, length;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2745 Failed to allocate mbox for "
+				"requesting FCF rediscover.\n");
+		return -ENOMEM;
+	}
+
+	length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
+			 length, LPFC_SLI4_MBX_EMBED);
+
+	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+	/* Set count to 0 for invalidating the entire FCF database */
+	bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
+
+	/* Issue the mailbox command asynchronously */
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+	if (rc == MBX_NOT_FINISHED) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
  * @phba: pointer to lpfc hba data structure.
  *
@@ -12069,3 +12205,48 @@
 	kfree(rgn23_data);
 	return;
 }
+
+/**
+ * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
+ * @vport: pointer to vport data structure.
+ *
+ * This function iterate through the mailboxq and clean up all REG_LOGIN
+ * and REG_VPI mailbox commands associated with the vport. This function
+ * is called when driver want to restart discovery of the vport due to
+ * a Clear Virtual Link event.
+ **/
+void
+lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	LPFC_MBOXQ_t *mb, *nextmb;
+	struct lpfc_dmabuf *mp;
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+		if (mb->vport != vport)
+			continue;
+
+		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
+			(mb->u.mb.mbxCommand != MBX_REG_VPI))
+			continue;
+
+		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+			mp = (struct lpfc_dmabuf *) (mb->context1);
+			if (mp) {
+				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
+				kfree(mp);
+			}
+		}
+		list_del(&mb->list);
+		mempool_free(mb, phba->mbox_mem_pool);
+	}
+	mb = phba->sli.mbox_active;
+	if (mb && (mb->vport == vport)) {
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
+			(mb->u.mb.mbxCommand == MBX_REG_VPI))
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	}
+	spin_unlock_irq(&phba->hbalock);
+}
+
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index ba38de3..dfcf543 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -53,17 +53,19 @@
 
 	IOCB_t iocb;		/* IOCB cmd */
 	uint8_t retry;		/* retry counter for IOCB cmd - if needed */
-	uint8_t iocb_flag;
+	uint16_t iocb_flag;
 #define LPFC_IO_LIBDFC		1	/* libdfc iocb */
 #define LPFC_IO_WAKE		2	/* High Priority Queue signal flag */
 #define LPFC_IO_FCP		4	/* FCP command -- iocbq in scsi_buf */
 #define LPFC_DRIVER_ABORTED	8	/* driver aborted this request */
 #define LPFC_IO_FABRIC		0x10	/* Iocb send using fabric scheduler */
 #define LPFC_DELAY_MEM_FREE	0x20    /* Defer free'ing of FC data */
-#define LPFC_FIP_ELS_ID_MASK	0xc0	/* ELS_ID range 0-3 */
-#define LPFC_FIP_ELS_ID_SHIFT	6
+#define LPFC_EXCHANGE_BUSY	0x40    /* SLI4 hba reported XB in response */
+#define LPFC_USE_FCPWQIDX	0x80    /* Submit to specified FCPWQ index */
 
-	uint8_t abort_count;
+#define LPFC_FIP_ELS_ID_MASK	0xc000	/* ELS_ID range 0-3, non-shifted mask */
+#define LPFC_FIP_ELS_ID_SHIFT	14
+
 	uint8_t rsvd2;
 	uint32_t drvrTimeout;	/* driver timeout in seconds */
 	uint32_t fcp_wqidx;	/* index to FCP work queue */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 44e5f57..86308836 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -22,6 +22,10 @@
 #define LPFC_RELEASE_NOTIFICATION_INTERVAL	32
 #define LPFC_GET_QE_REL_INT			32
 #define LPFC_RPI_LOW_WATER_MARK			10
+
+/* Amount of time in seconds for waiting FCF rediscovery to complete */
+#define LPFC_FCF_REDISCOVER_WAIT_TMO		2000 /* msec */
+
 /* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
 #define LPFC_NEMBED_MBOX_SGL_CNT		254
 
@@ -126,24 +130,36 @@
 	uint8_t status;
 	uint8_t physical;
 	uint8_t fault;
+	uint16_t logical_speed;
+};
+
+struct lpfc_fcf_rec {
+	uint8_t  fabric_name[8];
+	uint8_t  switch_name[8];
+	uint8_t  mac_addr[6];
+	uint16_t fcf_indx;
+	uint32_t priority;
+	uint16_t vlan_id;
+	uint32_t addr_mode;
+	uint32_t flag;
+#define BOOT_ENABLE	0x01
+#define RECORD_VALID	0x02
 };
 
 struct lpfc_fcf {
-	uint8_t	 fabric_name[8];
-	uint8_t	 switch_name[8];
-	uint8_t  mac_addr[6];
-	uint16_t fcf_indx;
 	uint16_t fcfi;
 	uint32_t fcf_flag;
 #define FCF_AVAILABLE	0x01 /* FCF available for discovery */
 #define FCF_REGISTERED	0x02 /* FCF registered with FW */
-#define FCF_DISCOVERED	0x04 /* FCF discovery started  */
-#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
-#define FCF_IN_USE	0x10 /* Atleast one discovery completed */
-#define FCF_VALID_VLAN	0x20 /* Use the vlan id specified */
-	uint32_t priority;
+#define FCF_SCAN_DONE	0x04 /* FCF table scan done */
+#define FCF_IN_USE	0x08 /* Atleast one discovery completed */
+#define FCF_REDISC_PEND	0x10 /* FCF rediscovery pending */
+#define FCF_REDISC_EVT	0x20 /* FCF rediscovery event to worker thread */
+#define FCF_REDISC_FOV	0x40 /* Post FCF rediscovery fast failover */
 	uint32_t addr_mode;
-	uint16_t vlan_id;
+	struct lpfc_fcf_rec current_rec;
+	struct lpfc_fcf_rec failover_rec;
+	struct timer_list redisc_wait;
 };
 
 #define LPFC_REGION23_SIGNATURE "RG23"
@@ -248,7 +264,10 @@
 #define SLI4_CT_VFI 2
 #define SLI4_CT_FCFI 3
 
-#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
+#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE	0x10000
+#define LPFC_SLI4_FL1_MAX_BUF_SIZE	0X2000
+#define LPFC_SLI4_MIN_BUF_SIZE		0x400
+#define LPFC_SLI4_MAX_BUF_SIZE		0x20000
 
 /*
  * SLI4 specific data structures
@@ -282,6 +301,42 @@
 	struct lpfc_hba *phba;
 };
 
+/* Port Capabilities for SLI4 Parameters */
+struct lpfc_pc_sli4_params {
+	uint32_t supported;
+	uint32_t if_type;
+	uint32_t sli_rev;
+	uint32_t sli_family;
+	uint32_t featurelevel_1;
+	uint32_t featurelevel_2;
+	uint32_t proto_types;
+#define LPFC_SLI4_PROTO_FCOE	0x0000001
+#define LPFC_SLI4_PROTO_FC	0x0000002
+#define LPFC_SLI4_PROTO_NIC	0x0000004
+#define LPFC_SLI4_PROTO_ISCSI	0x0000008
+#define LPFC_SLI4_PROTO_RDMA	0x0000010
+	uint32_t sge_supp_len;
+	uint32_t if_page_sz;
+	uint32_t rq_db_window;
+	uint32_t loopbk_scope;
+	uint32_t eq_pages_max;
+	uint32_t eqe_size;
+	uint32_t cq_pages_max;
+	uint32_t cqe_size;
+	uint32_t mq_pages_max;
+	uint32_t mqe_size;
+	uint32_t mq_elem_cnt;
+	uint32_t wq_pages_max;
+	uint32_t wqe_size;
+	uint32_t rq_pages_max;
+	uint32_t rqe_size;
+	uint32_t hdr_pages_max;
+	uint32_t hdr_size;
+	uint32_t hdr_pp_align;
+	uint32_t sgl_pages_max;
+	uint32_t sgl_pp_align;
+};
+
 /* SLI4 HBA data structure entries */
 struct lpfc_sli4_hba {
 	void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -295,7 +350,7 @@
 	void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
 	void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */
 	void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */
-	void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
+	void __iomem *SLIINTFregaddr; /* Address to SLI_INTF register */
 	/* BAR1 FCoE function CSR register memory map */
 	void __iomem *STAregaddr;    /* Address to HST_STATE register */
 	void __iomem *ISRregaddr;    /* Address to HST_ISR register */
@@ -310,6 +365,8 @@
 
 	uint32_t ue_mask_lo;
 	uint32_t ue_mask_hi;
+	struct lpfc_register sli_intf;
+	struct lpfc_pc_sli4_params pc_sli4_params;
 	struct msix_entry *msix_entries;
 	uint32_t cfg_eqn;
 	struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
@@ -406,6 +463,8 @@
 void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
 void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
 			   struct lpfc_mbx_sge *);
+int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *,
+				  uint16_t);
 
 void lpfc_sli4_hba_reset(struct lpfc_hba *);
 struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
@@ -448,6 +507,7 @@
 void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
 void lpfc_sli4_remove_rpis(struct lpfc_hba *);
 void lpfc_sli4_async_event_proc(struct lpfc_hba *);
+void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
 int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 792f722..ac276aa 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.7"
+#define LPFC_DRIVER_VERSION "8.3.9"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e3c7fa6..dc86e87 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -389,7 +389,7 @@
 	 * by the port.
 	 */
 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
-	    (pport->vpi_state & LPFC_VPI_REGISTERED)) {
+		(pport->fc_flag & FC_VFI_REGISTERED)) {
 		rc = lpfc_sli4_init_vpi(phba, vpi);
 		if (rc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@@ -505,6 +505,7 @@
 	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_nodelist *ndlp = NULL;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
 	if ((phba->link_state < LPFC_LINK_UP) ||
 	    (phba->fc_topology == TOPOLOGY_LOOP)) {
@@ -512,10 +513,10 @@
 		return VPORT_OK;
 	}
 
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irq(shost->host_lock);
 	vport->load_flag |= FC_LOADING;
 	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irq(shost->host_lock);
 
 	/* Use the Physical nodes Fabric NDLP to determine if the link is
 	 * up and ready to FDISC.
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index c24e86f..dd808ae 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -22,7 +22,6 @@
 
 #include <asm/irq.h>
 #include <asm/dma.h>
-
 #include <asm/macints.h>
 #include <asm/macintosh.h>
 
@@ -279,24 +278,27 @@
  * Programmed IO routines follow.
  */
 
-static inline int mac_esp_wait_for_fifo(struct esp *esp)
+static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
 {
 	int i = 500000;
 
 	do {
-		if (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES)
-			return 0;
+		unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+		if (fbytes)
+			return fbytes;
 
 		udelay(2);
 	} while (--i);
 
 	printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
 	       esp_read8(ESP_STATUS));
-	return 1;
+	return 0;
 }
 
 static inline int mac_esp_wait_for_intr(struct esp *esp)
 {
+	struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
 	int i = 500000;
 
 	do {
@@ -308,6 +310,7 @@
 	} while (--i);
 
 	printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
+	mep->error = 1;
 	return 1;
 }
 
@@ -347,11 +350,10 @@
 static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
 				 u32 dma_count, int write, u8 cmd)
 {
-	unsigned long flags;
 	struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
 	u8 *fifo = esp->regs + ESP_FDATA * 16;
 
-	local_irq_save(flags);
+	disable_irq(esp->host->irq);
 
 	cmd &= ~ESP_CMD_DMA;
 	mep->error = 0;
@@ -359,11 +361,35 @@
 	if (write) {
 		scsi_esp_cmd(esp, cmd);
 
-		if (!mac_esp_wait_for_intr(esp)) {
-			if (mac_esp_wait_for_fifo(esp))
-				esp_count = 0;
-		} else {
-			esp_count = 0;
+		while (1) {
+			unsigned int n;
+
+			n = mac_esp_wait_for_fifo(esp);
+			if (!n)
+				break;
+
+			if (n > esp_count)
+				n = esp_count;
+			esp_count -= n;
+
+			MAC_ESP_PIO_LOOP("%2@,%0@+", n);
+
+			if (!esp_count)
+				break;
+
+			if (mac_esp_wait_for_intr(esp))
+				break;
+
+			if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) &&
+			    ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP))
+				break;
+
+			esp->ireg = esp_read8(ESP_INTRPT);
+			if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
+			    ESP_INTR_BSERV)
+				break;
+
+			scsi_esp_cmd(esp, ESP_CMD_TI);
 		}
 	} else {
 		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
@@ -374,47 +400,24 @@
 			MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
 
 		scsi_esp_cmd(esp, cmd);
-	}
 
-	while (esp_count) {
-		unsigned int n;
+		while (esp_count) {
+			unsigned int n;
 
-		if (mac_esp_wait_for_intr(esp)) {
-			mep->error = 1;
-			break;
-		}
-
-		if (esp->sreg & ESP_STAT_SPAM) {
-			printk(KERN_ERR PFX "gross error\n");
-			mep->error = 1;
-			break;
-		}
-
-		n = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
-
-		if (write) {
-			if (n > esp_count)
-				n = esp_count;
-			esp_count -= n;
-
-			MAC_ESP_PIO_LOOP("%2@,%0@+", n);
-
-			if ((esp->sreg & ESP_STAT_PMASK) == ESP_STATP)
+			if (mac_esp_wait_for_intr(esp))
 				break;
 
-			if (esp_count) {
-				esp->ireg = esp_read8(ESP_INTRPT);
-				if (esp->ireg & ESP_INTR_DC)
-					break;
+			if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) &&
+			    ((esp->sreg & ESP_STAT_PMASK) != ESP_MOP))
+				break;
 
-				scsi_esp_cmd(esp, ESP_CMD_TI);
-			}
-		} else {
 			esp->ireg = esp_read8(ESP_INTRPT);
-			if (esp->ireg & ESP_INTR_DC)
+			if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
+			    ESP_INTR_BSERV)
 				break;
 
-			n = MAC_ESP_FIFO_SIZE - n;
+			n = MAC_ESP_FIFO_SIZE -
+			    (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
 			if (n > esp_count)
 				n = esp_count;
 
@@ -429,7 +432,7 @@
 		}
 	}
 
-	local_irq_restore(flags);
+	enable_irq(esp->host->irq);
 }
 
 static int mac_esp_irq_pending(struct esp *esp)
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index d9b8ca5..409648f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
  *	   2 of the License, or (at your option) any later version.
  *
  * FILE		: megaraid_sas.c
- * Version     : v00.00.04.12-rc1
+ * Version     : v00.00.04.17.1-rc1
  *
  * Authors:
  *	(email-id : megaraidlinux@lsi.com)
@@ -843,6 +843,7 @@
 	pthru->lun = scp->device->lun;
 	pthru->cdb_len = scp->cmd_len;
 	pthru->timeout = 0;
+	pthru->pad_0 = 0;
 	pthru->flags = flags;
 	pthru->data_xfer_len = scsi_bufflen(scp);
 
@@ -874,6 +875,12 @@
 		pthru->sge_count = megasas_make_sgl32(instance, scp,
 						      &pthru->sgl);
 
+	if (pthru->sge_count > instance->max_num_sge) {
+		printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n",
+			pthru->sge_count);
+		return 0;
+	}
+
 	/*
 	 * Sense info specific
 	 */
@@ -1000,6 +1007,12 @@
 	} else
 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
 
+	if (ldio->sge_count > instance->max_num_sge) {
+		printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n",
+			ldio->sge_count);
+		return 0;
+	}
+
 	/*
 	 * Sense info specific
 	 */
@@ -2250,6 +2263,7 @@
 	dcmd->sge_count = 1;
 	dcmd->flags = MFI_FRAME_DIR_READ;
 	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
 	dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
 	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
 	dcmd->sgl.sge32[0].phys_addr = ci_h;
@@ -2294,6 +2308,86 @@
 	return ret;
 }
 
+/*
+ * megasas_get_ld_list_info -	Returns FW's ld_list structure
+ * @instance:				Adapter soft state
+ * @ld_list:				ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_ld_list(struct megasas_instance *instance)
+{
+	int ret = 0, ld_index = 0, ids = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_LD_LIST *ci;
+	dma_addr_t ci_h = 0;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n");
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	ci = pci_alloc_consistent(instance->pdev,
+				sizeof(struct MR_LD_LIST),
+				&ci_h);
+
+	if (!ci) {
+		printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n");
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->timeout = 0;
+	dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
+	dcmd->opcode = MR_DCMD_LD_GET_LIST;
+	dcmd->sgl.sge32[0].phys_addr = ci_h;
+	dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
+	dcmd->pad_0  = 0;
+
+	if (!megasas_issue_polled(instance, cmd)) {
+		ret = 0;
+	} else {
+		ret = -1;
+	}
+
+	/* the following function will get the instance PD LIST */
+
+	if ((ret == 0) && (ci->ldCount < MAX_LOGICAL_DRIVES)) {
+		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+
+		for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
+			if (ci->ldList[ld_index].state != 0) {
+				ids = ci->ldList[ld_index].ref.targetId;
+				instance->ld_ids[ids] =
+					ci->ldList[ld_index].ref.targetId;
+			}
+		}
+	}
+
+	pci_free_consistent(instance->pdev,
+				sizeof(struct MR_LD_LIST),
+				ci,
+				ci_h);
+
+	megasas_return_cmd(instance, cmd);
+	return ret;
+}
+
 /**
  * megasas_get_controller_info -	Returns FW's controller structure
  * @instance:				Adapter soft state
@@ -2339,6 +2433,7 @@
 	dcmd->sge_count = 1;
 	dcmd->flags = MFI_FRAME_DIR_READ;
 	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
 	dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
 	dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
 	dcmd->sgl.sge32[0].phys_addr = ci_h;
@@ -2590,6 +2685,9 @@
 		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
 	megasas_get_pd_list(instance);
 
+	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+	megasas_get_ld_list(instance);
+
 	ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
 
 	/*
@@ -2714,6 +2812,7 @@
 	dcmd->sge_count = 1;
 	dcmd->flags = MFI_FRAME_DIR_READ;
 	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
 	dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
 	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
 	dcmd->sgl.sge32[0].phys_addr = el_info_h;
@@ -2828,6 +2927,7 @@
 	dcmd->sge_count = 1;
 	dcmd->flags = MFI_FRAME_DIR_READ;
 	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
 	dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
 	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
 	dcmd->mbox.w[0] = seq_num;
@@ -3166,6 +3266,7 @@
 	dcmd->sge_count = 0;
 	dcmd->flags = MFI_FRAME_DIR_NONE;
 	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
 	dcmd->data_xfer_len = 0;
 	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
@@ -3205,6 +3306,7 @@
 	dcmd->sge_count = 0;
 	dcmd->flags = MFI_FRAME_DIR_NONE;
 	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
 	dcmd->data_xfer_len = 0;
 	dcmd->opcode = opcode;
 
@@ -3984,6 +4086,7 @@
 	struct  Scsi_Host *host;
 	struct  scsi_device *sdev1;
 	u16     pd_index = 0;
+	u16	ld_index = 0;
 	int     i, j, doscan = 0;
 	u32 seq_num;
 	int error;
@@ -3999,8 +4102,124 @@
 
 		switch (instance->evt_detail->code) {
 		case MR_EVT_PD_INSERTED:
+			if (megasas_get_pd_list(instance) == 0) {
+			for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+				for (j = 0;
+				j < MEGASAS_MAX_DEV_PER_CHANNEL;
+				j++) {
+
+				pd_index =
+				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+				sdev1 =
+				scsi_device_lookup(host, i, j, 0);
+
+				if (instance->pd_list[pd_index].driveState
+						== MR_PD_STATE_SYSTEM) {
+						if (!sdev1) {
+						scsi_add_device(host, i, j, 0);
+						}
+
+					if (sdev1)
+						scsi_device_put(sdev1);
+					}
+				}
+			}
+			}
+			doscan = 0;
+			break;
+
 		case MR_EVT_PD_REMOVED:
+			if (megasas_get_pd_list(instance) == 0) {
+			megasas_get_pd_list(instance);
+			for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+				for (j = 0;
+				j < MEGASAS_MAX_DEV_PER_CHANNEL;
+				j++) {
+
+				pd_index =
+				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+				sdev1 =
+				scsi_device_lookup(host, i, j, 0);
+
+				if (instance->pd_list[pd_index].driveState
+					== MR_PD_STATE_SYSTEM) {
+					if (sdev1) {
+						scsi_device_put(sdev1);
+					}
+				} else {
+					if (sdev1) {
+						scsi_remove_device(sdev1);
+						scsi_device_put(sdev1);
+					}
+				}
+				}
+			}
+			}
+			doscan = 0;
+			break;
+
+		case MR_EVT_LD_OFFLINE:
+		case MR_EVT_LD_DELETED:
+			megasas_get_ld_list(instance);
+			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+				for (j = 0;
+				j < MEGASAS_MAX_DEV_PER_CHANNEL;
+				j++) {
+
+				ld_index =
+				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+				sdev1 = scsi_device_lookup(host,
+					i + MEGASAS_MAX_LD_CHANNELS,
+					j,
+					0);
+
+				if (instance->ld_ids[ld_index] != 0xff) {
+					if (sdev1) {
+						scsi_device_put(sdev1);
+					}
+				} else {
+					if (sdev1) {
+						scsi_remove_device(sdev1);
+						scsi_device_put(sdev1);
+					}
+				}
+				}
+			}
+			doscan = 0;
+			break;
+		case MR_EVT_LD_CREATED:
+			megasas_get_ld_list(instance);
+			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+				for (j = 0;
+					j < MEGASAS_MAX_DEV_PER_CHANNEL;
+					j++) {
+					ld_index =
+					(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+					sdev1 = scsi_device_lookup(host,
+						i+MEGASAS_MAX_LD_CHANNELS,
+						j, 0);
+
+					if (instance->ld_ids[ld_index] !=
+								0xff) {
+						if (!sdev1) {
+							scsi_add_device(host,
+								i + 2,
+								j, 0);
+						}
+					}
+					if (sdev1) {
+						scsi_device_put(sdev1);
+					}
+				}
+			}
+			doscan = 0;
+			break;
 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+		case MR_EVT_FOREIGN_CFG_IMPORTED:
 			doscan = 1;
 			break;
 		default:
@@ -4035,6 +4254,31 @@
 				}
 			}
 		}
+
+		megasas_get_ld_list(instance);
+		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+				ld_index =
+				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+				sdev1 = scsi_device_lookup(host,
+					i+MEGASAS_MAX_LD_CHANNELS, j, 0);
+				if (instance->ld_ids[ld_index] != 0xff) {
+					if (!sdev1) {
+						scsi_add_device(host,
+								i+2,
+								j, 0);
+					} else {
+						scsi_device_put(sdev1);
+					}
+				} else {
+					if (sdev1) {
+						scsi_remove_device(sdev1);
+						scsi_device_put(sdev1);
+					}
+				}
+			}
+		}
 	}
 
 	if ( instance->aen_cmd != NULL ) {
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 72b28e4..9d8b6bf 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"00.00.04.12-rc1"
-#define MEGASAS_RELDATE				"Sep. 17, 2009"
-#define MEGASAS_EXT_VERSION			"Thu Sep. 17 11:41:51 PST 2009"
+#define MEGASAS_VERSION			"00.00.04.17.1-rc1"
+#define MEGASAS_RELDATE			"Oct. 29, 2009"
+#define MEGASAS_EXT_VERSION		"Thu. Oct. 29, 11:41:51 PST 2009"
 
 /*
  * Device IDs
@@ -117,6 +117,7 @@
 #define MFI_CMD_STP				0x08
 
 #define MR_DCMD_CTRL_GET_INFO			0x01010000
+#define MR_DCMD_LD_GET_LIST			0x03010000
 
 #define MR_DCMD_CTRL_CACHE_FLUSH		0x01101000
 #define MR_FLUSH_CTRL_CACHE			0x01
@@ -349,6 +350,32 @@
 	u8             driveState;
 } __packed;
 
+ /*
+ * defines the logical drive reference structure
+ */
+union  MR_LD_REF {
+	struct {
+		u8      targetId;
+		u8      reserved;
+		u16     seqNum;
+	};
+	u32     ref;
+} __packed;
+
+/*
+ * defines the logical drive list structure
+ */
+struct MR_LD_LIST {
+	u32     ldCount;
+	u32     reserved;
+	struct {
+		union MR_LD_REF   ref;
+		u8          state;
+		u8          reserved[3];
+		u64         size;
+	} ldList[MAX_LOGICAL_DRIVES];
+} __packed;
+
 /*
  * SAS controller properties
  */
@@ -637,6 +664,8 @@
 #define MEGASAS_MAX_LD				64
 #define MEGASAS_MAX_PD                          (MEGASAS_MAX_PD_CHANNELS * \
 						MEGASAS_MAX_DEV_PER_CHANNEL)
+#define MEGASAS_MAX_LD_IDS			(MEGASAS_MAX_LD_CHANNELS * \
+						MEGASAS_MAX_DEV_PER_CHANNEL)
 
 #define MEGASAS_DBG_LVL				1
 
@@ -1187,6 +1216,7 @@
 	struct megasas_register_set __iomem *reg_set;
 
 	struct megasas_pd_list          pd_list[MEGASAS_MAX_PD];
+	u8     ld_ids[MEGASAS_MAX_LD_IDS];
 	s8 init_id;
 
 	u16 max_num_sge;
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index 70c4c24..ba8e128 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -44,6 +44,7 @@
 	tristate "LSI MPT Fusion SAS 2.0 Device Driver"
 	depends on PCI && SCSI
 	select SCSI_SAS_ATTRS
+	select RAID_ATTRS
 	---help---
 	This driver supports PCI-Express SAS 6Gb/s Host Adapters.
 
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 9141681..9958d84 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.13
+ *  mpi2.h Version:  02.00.14
  *
  *  Version History
  *  ---------------
@@ -53,6 +53,10 @@
  *                      bytes reserved.
  *                      Added RAID Accelerator functionality.
  *  07-30-09  02.00.13  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  10-28-09  02.00.14  Bumped MPI2_HEADER_VERSION_UNIT.
+ *                      Added MSI-x index mask and shift for Reply Post Host
+ *                      Index register.
+ *                      Added function code for Host Based Discovery Action.
  *  --------------------------------------------------------------------------
  */
 
@@ -78,7 +82,7 @@
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x0D)
+#define MPI2_HEADER_VERSION_UNIT            (0x0E)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
@@ -232,9 +236,12 @@
 #define MPI2_REPLY_FREE_HOST_INDEX_OFFSET       (0x00000048)
 
 /*
- * Offset for the Reply Descriptor Post Queue
+ * Defines for the Reply Descriptor Post Queue
  */
 #define MPI2_REPLY_POST_HOST_INDEX_OFFSET       (0x0000006C)
+#define MPI2_REPLY_POST_HOST_INDEX_MASK         (0x00FFFFFF)
+#define MPI2_RPHI_MSIX_INDEX_MASK               (0xFF000000)
+#define MPI2_RPHI_MSIX_INDEX_SHIFT              (24)
 
 /*
  * Defines for the HCBSize and address
@@ -497,12 +504,13 @@
 #define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST      (0x24) /* Target Command Buffer Post Base */
 #define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST      (0x25) /* Target Command Buffer Post List */
 #define MPI2_FUNCTION_RAID_ACCELERATOR              (0x2C) /* RAID Accelerator*/
+/* Host Based Discovery Action */
+#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION   (0x2F)
 
 
 
 /* Doorbell functions */
 #define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET        (0x40)
-/* #define MPI2_FUNCTION_IO_UNIT_RESET                 (0x41) */
 #define MPI2_FUNCTION_HANDSHAKE                     (0x42)
 
 
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 1611c57..cf0ac9f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
  *          Title:  MPI Configuration messages and pages
  *  Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.12
+ *    mpi2_cnfg.h Version:  02.00.13
  *
  *  Version History
  *  ---------------
@@ -107,6 +107,8 @@
  *                      to SAS Device Page 0 Flags field.
  *                      Added PhyInfo defines for power condition.
  *                      Added Ethernet configuration pages.
+ *  10-28-09  02.00.13  Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
+ *                      Added SAS PHY Page 4 structure and defines.
  *  --------------------------------------------------------------------------
  */
 
@@ -712,6 +714,7 @@
 #define MPI2_IOUNITPAGE1_PAGEVERSION                    (0x04)
 
 /* IO Unit Page 1 Flags defines */
+#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY    (0x00000800)
 #define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE          (0x00000600)
 #define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE        (0x00000000)
 #define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE       (0x00000200)
@@ -2291,6 +2294,26 @@
 #define MPI2_SASPHY3_PAGEVERSION            (0x00)
 
 
+/* SAS PHY Page 4 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 {
+    MPI2_CONFIG_EXTENDED_PAGE_HEADER    Header;                     /* 0x00 */
+    U16                                 Reserved1;                  /* 0x08 */
+    U8                                  Reserved2;                  /* 0x0A */
+    U8                                  Flags;                      /* 0x0B */
+    U8                                  InitialFrame[28];           /* 0x0C */
+} MPI2_CONFIG_PAGE_SAS_PHY_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_4,
+  Mpi2SasPhyPage4_t, MPI2_POINTER pMpi2SasPhyPage4_t;
+
+#define MPI2_SASPHY4_PAGEVERSION            (0x00)
+
+/* values for the Flags field */
+#define MPI2_SASPHY4_FLAGS_FRAME_VALID        (0x02)
+#define MPI2_SASPHY4_FLAGS_SATA_FRAME         (0x01)
+
+
+
+
 /****************************************************************************
 *   SAS Port Config Pages
 ****************************************************************************/
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
index 65fcaa3..c4adf76 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
@@ -5,23 +5,24 @@
  Copyright (c) 2000-2009 LSI Corporation.
 
  ---------------------------------------
- Header Set Release Version:    02.00.12
- Header Set Release Date:       05-06-09
+ Header Set Release Version:    02.00.14
+ Header Set Release Date:       10-28-09
  ---------------------------------------
 
  Filename               Current version     Prior version
  ----------             ---------------     -------------
- mpi2.h                 02.00.12            02.00.11
- mpi2_cnfg.h            02.00.11            02.00.10
- mpi2_init.h            02.00.07            02.00.06
- mpi2_ioc.h             02.00.11            02.00.10
- mpi2_raid.h            02.00.03            02.00.03
- mpi2_sas.h             02.00.02            02.00.02
+ mpi2.h                 02.00.14            02.00.13
+ mpi2_cnfg.h            02.00.13            02.00.12
+ mpi2_init.h            02.00.08            02.00.07
+ mpi2_ioc.h             02.00.13            02.00.12
+ mpi2_raid.h            02.00.04            02.00.04
+ mpi2_sas.h             02.00.03            02.00.02
  mpi2_targ.h            02.00.03            02.00.03
- mpi2_tool.h            02.00.03            02.00.02
+ mpi2_tool.h            02.00.04            02.00.04
  mpi2_type.h            02.00.00            02.00.00
- mpi2_ra.h              02.00.00
- mpi2_history.txt       02.00.11            02.00.12
+ mpi2_ra.h              02.00.00            02.00.00
+ mpi2_hbd.h             02.00.00
+ mpi2_history.txt       02.00.14            02.00.13
 
 
  *  Date      Version   Description
@@ -65,6 +66,11 @@
  *                      MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
  *                      bytes reserved.
  *                      Added RAID Accelerator functionality.
+ *  07-30-09  02.00.13  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  10-28-09  02.00.14  Bumped MPI2_HEADER_VERSION_UNIT.
+ *                      Added MSI-x index mask and shift for Reply Post Host
+ *                      Index register.
+ *                      Added function code for Host Based Discovery Action.
  *  --------------------------------------------------------------------------
 
 mpi2_cnfg.h
@@ -155,6 +161,15 @@
  *                      Added expander reduced functionality data to SAS
  *                      Expander Page 0.
  *                      Added SAS PHY Page 2 and SAS PHY Page 3.
+ *  07-30-09  02.00.12  Added IO Unit Page 7.
+ *                      Added new device ids.
+ *                      Added SAS IO Unit Page 5.
+ *                      Added partial and slumber power management capable flags
+ *                      to SAS Device Page 0 Flags field.
+ *                      Added PhyInfo defines for power condition.
+ *                      Added Ethernet configuration pages.
+ *  10-28-09  02.00.13  Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
+ *                      Added SAS PHY Page 4 structure and defines.
  *  --------------------------------------------------------------------------
 
 mpi2_init.h
@@ -172,6 +187,10 @@
  *                      Query Asynchronous Event.
  *                      Defined two new bits in the SlotStatus field of the SCSI
  *                      Enclosure Processor Request and Reply.
+ *  10-28-09  02.00.08  Added defines for decoding the ResponseInfo bytes for
+ *                      both SCSI IO Error Reply and SCSI Task Management Reply.
+ *                      Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
+ *                      Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
  *  --------------------------------------------------------------------------
 
 mpi2_ioc.h
@@ -246,6 +265,20 @@
  *                      Added two new reason codes for SAS Device Status Change
  *                      Event.
  *                      Added new event: SAS PHY Counter.
+ *  07-30-09  02.00.12  Added GPIO Interrupt event define and structure.
+ *                      Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ *                      Added new product id family for 2208.
+ *  10-28-09  02.00.13  Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
+ *                      Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
+ *                      Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
+ *                      Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
+ *                      Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
+ *                      Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
+ *                      Added Host Based Discovery Phy Event data.
+ *                      Added defines for ProductID Product field
+ *                      (MPI2_FW_HEADER_PID_).
+ *                      Modified values for SAS ProductID Family
+ *                      (MPI2_FW_HEADER_PID_FAMILY_).
  *  --------------------------------------------------------------------------
 
 mpi2_raid.h
@@ -256,6 +289,8 @@
  *  05-21-08  02.00.03  Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
  *                      the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
  *                      can be sized by the build environment.
+ *  07-30-09  02.00.04  Added proper define for the Use Default Settings bit of
+ *                      VolumeCreationFlags and marked the old one as obsolete.
  *  --------------------------------------------------------------------------
 
 mpi2_sas.h
@@ -264,6 +299,8 @@
  *                      Control Request.
  *  10-02-08  02.00.02  Added Set IOC Parameter Operation to SAS IO Unit Control
  *                      Request.
+ *  10-28-09  02.00.03  Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
+ *                      to MPI2_SGE_IO_UNION since it supports chained SGLs.
  *  --------------------------------------------------------------------------
 
 mpi2_targ.h
@@ -283,6 +320,10 @@
  *                      structures and defines.
  *  02-29-08  02.00.02  Modified various names to make them 32-character unique.
  *  05-06-09  02.00.03  Added ISTWI Read Write Tool and Diagnostic CLI Tool.
+ *  07-30-09  02.00.04  Added ExtendedType field to DiagnosticBufferPost request
+ *                      and reply messages.
+ *                      Added MPI2_DIAG_BUF_TYPE_EXTENDED.
+ *                      Incremented MPI2_DIAG_BUF_TYPE_COUNT.
  *  --------------------------------------------------------------------------
 
 mpi2_type.h
@@ -293,20 +334,26 @@
  *  05-06-09  02.00.00  Initial version.
  *  --------------------------------------------------------------------------
 
+mpi2_hbd.h
+ *  10-28-09  02.00.00  Initial version.
+ *  --------------------------------------------------------------------------
+
+
 mpi2_history.txt         Parts list history
 
-Filename     02.00.12
-----------   --------
-mpi2.h       02.00.12
-mpi2_cnfg.h  02.00.11
-mpi2_init.h  02.00.07
-mpi2_ioc.h   02.00.11
-mpi2_raid.h  02.00.03
-mpi2_sas.h   02.00.02
-mpi2_targ.h  02.00.03
-mpi2_tool.h  02.00.03
-mpi2_type.h  02.00.00
-mpi2_ra.h    02.00.00
+Filename     02.00.14  02.00.13  02.00.12
+----------   --------  --------  --------
+mpi2.h       02.00.14  02.00.13  02.00.12
+mpi2_cnfg.h  02.00.13  02.00.12  02.00.11
+mpi2_init.h  02.00.08  02.00.07  02.00.07
+mpi2_ioc.h   02.00.13  02.00.12  02.00.11
+mpi2_raid.h  02.00.04  02.00.04  02.00.03
+mpi2_sas.h   02.00.03  02.00.02  02.00.02
+mpi2_targ.h  02.00.03  02.00.03  02.00.03
+mpi2_tool.h  02.00.04  02.00.04  02.00.03
+mpi2_type.h  02.00.00  02.00.00  02.00.00
+mpi2_ra.h    02.00.00  02.00.00  02.00.00
+mpi2_hbd.h   02.00.00
 
 Filename     02.00.11  02.00.10  02.00.09  02.00.08  02.00.07  02.00.06
 ----------   --------  --------  --------  --------  --------  --------
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 563e56d..6541945 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -6,7 +6,7 @@
  *          Title:  MPI SCSI initiator mode messages and structures
  *  Creation Date:  June 23, 2006
  *
- *    mpi2_init.h Version:  02.00.07
+ *    mpi2_init.h Version:  02.00.08
  *
  *  Version History
  *  ---------------
@@ -27,6 +27,10 @@
  *                      Query Asynchronous Event.
  *                      Defined two new bits in the SlotStatus field of the SCSI
  *                      Enclosure Processor Request and Reply.
+ *  10-28-09  02.00.08  Added defines for decoding the ResponseInfo bytes for
+ *                      both SCSI IO Error Reply and SCSI Task Management Reply.
+ *                      Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
+ *                      Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
  *  --------------------------------------------------------------------------
  */
 
@@ -254,6 +258,11 @@
 #define MPI2_SCSI_STATE_AUTOSENSE_FAILED        (0x02)
 #define MPI2_SCSI_STATE_AUTOSENSE_VALID         (0x01)
 
+/* masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSI_RI_MASK_REASONCODE            (0x000000FF)
+#define MPI2_SCSI_RI_SHIFT_REASONCODE           (0)
+
 #define MPI2_SCSI_TASKTAG_UNKNOWN               (0xFFFF)
 
 
@@ -327,6 +336,7 @@
     U16                     IOCStatus;                      /* 0x0E */
     U32                     IOCLogInfo;                     /* 0x10 */
     U32                     TerminationCount;               /* 0x14 */
+    U32                     ResponseInfo;                   /* 0x18 */
 } MPI2_SCSI_TASK_MANAGE_REPLY,
   MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY,
   Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t;
@@ -339,8 +349,20 @@
 #define MPI2_SCSITASKMGMT_RSP_TM_FAILED                 (0x05)
 #define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED              (0x08)
 #define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN            (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG         (0x0A)
 #define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC          (0x80)
 
+/* masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE            (0x000000FF)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE           (0)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI2                  (0x0000FF00)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2                 (8)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI1                  (0x00FF0000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1                 (16)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI0                  (0xFF000000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0                 (24)
+
 
 /****************************************************************************
 *  SCSI Enclosure Processor messages
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index ea51ce8..7549384 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  October 11, 2006
  *
- *  mpi2_ioc.h Version:  02.00.12
+ *  mpi2_ioc.h Version:  02.00.13
  *
  *  Version History
  *  ---------------
@@ -87,6 +87,17 @@
  *  07-30-09  02.00.12  Added GPIO Interrupt event define and structure.
  *                      Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
  *                      Added new product id family for 2208.
+ *  10-28-09  02.00.13  Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
+ *                      Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
+ *                      Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
+ *                      Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
+ *                      Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
+ *                      Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
+ *                      Added Host Based Discovery Phy Event data.
+ *                      Added defines for ProductID Product field
+ *                      (MPI2_FW_HEADER_PID_).
+ *                      Modified values for SAS ProductID Family
+ *                      (MPI2_FW_HEADER_PID_FAMILY_).
  *  --------------------------------------------------------------------------
  */
 
@@ -119,8 +130,10 @@
     U16                     MsgVersion;                     /* 0x0C */
     U16                     HeaderVersion;                  /* 0x0E */
     U32                     Reserved5;                      /* 0x10 */
-    U32                     Reserved6;                      /* 0x14 */
-    U16                     Reserved7;                      /* 0x18 */
+    U16                     Reserved6;                      /* 0x14 */
+    U8                      Reserved7;                      /* 0x16 */
+    U8                      HostMSIxVectors;                /* 0x17 */
+    U16                     Reserved8;                      /* 0x18 */
     U16                     SystemRequestFrameSize;         /* 0x1A */
     U16                     ReplyDescriptorPostQueueDepth;  /* 0x1C */
     U16                     ReplyFreeQueueDepth;            /* 0x1E */
@@ -215,7 +228,7 @@
     U8                      MaxChainDepth;                  /* 0x14 */
     U8                      WhoInit;                        /* 0x15 */
     U8                      NumberOfPorts;                  /* 0x16 */
-    U8                      Reserved2;                      /* 0x17 */
+    U8                      MaxMSIxVectors;                 /* 0x17 */
     U16                     RequestCredit;                  /* 0x18 */
     U16                     ProductID;                      /* 0x1A */
     U32                     IOCCapabilities;                /* 0x1C */
@@ -233,7 +246,8 @@
     U8                      MaxVolumes;                     /* 0x37 */
     U16                     MaxDevHandle;                   /* 0x38 */
     U16                     MaxPersistentEntries;           /* 0x3A */
-    U32                     Reserved4;                      /* 0x3C */
+    U16                     MinDevHandle;                   /* 0x3C */
+    U16                     Reserved4;                      /* 0x3E */
 } MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY,
   Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t;
 
@@ -269,6 +283,7 @@
 /* ProductID field uses MPI2_FW_HEADER_PID_ */
 
 /* IOCCapabilities */
+#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY   (0x00010000)
 #define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX            (0x00008000)
 #define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR       (0x00004000)
 #define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY           (0x00002000)
@@ -453,6 +468,7 @@
 #define MPI2_EVENT_LOG_ENTRY_ADDED                  (0x0021)
 #define MPI2_EVENT_SAS_PHY_COUNTER                  (0x0022)
 #define MPI2_EVENT_GPIO_INTERRUPT                   (0x0023)
+#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY         (0x0024)
 
 
 /* Log Entry Added Event data */
@@ -793,6 +809,7 @@
   MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t;
 
 /* values for the ExpStatus field */
+#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER                  (0x00)
 #define MPI2_EVENT_SAS_TOPO_ES_ADDED                        (0x01)
 #define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING               (0x02)
 #define MPI2_EVENT_SAS_TOPO_ES_RESPONDING                   (0x03)
@@ -878,6 +895,44 @@
  * */
 
 
+/* Host Based Discovery Phy Event data */
+
+typedef struct _MPI2_EVENT_HBD_PHY_SAS {
+    U8          Flags;                      /* 0x00 */
+    U8          NegotiatedLinkRate;         /* 0x01 */
+    U8          PhyNum;                     /* 0x02 */
+    U8          PhysicalPort;               /* 0x03 */
+    U32         Reserved1;                  /* 0x04 */
+    U8          InitialFrame[28];           /* 0x08 */
+} MPI2_EVENT_HBD_PHY_SAS, MPI2_POINTER PTR_MPI2_EVENT_HBD_PHY_SAS,
+  Mpi2EventHbdPhySas_t, MPI2_POINTER pMpi2EventHbdPhySas_t;
+
+/* values for the Flags field */
+#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID        (0x02)
+#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME         (0x01)
+
+/* use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h for
+ * the NegotiatedLinkRate field */
+
+typedef union _MPI2_EVENT_HBD_DESCRIPTOR {
+    MPI2_EVENT_HBD_PHY_SAS      Sas;
+} MPI2_EVENT_HBD_DESCRIPTOR, MPI2_POINTER PTR_MPI2_EVENT_HBD_DESCRIPTOR,
+  Mpi2EventHbdDescriptor_t, MPI2_POINTER pMpi2EventHbdDescriptor_t;
+
+typedef struct _MPI2_EVENT_DATA_HBD_PHY {
+    U8                          DescriptorType;     /* 0x00 */
+    U8                          Reserved1;          /* 0x01 */
+    U16                         Reserved2;          /* 0x02 */
+    U32                         Reserved3;          /* 0x04 */
+    MPI2_EVENT_HBD_DESCRIPTOR   Descriptor;         /* 0x08 */
+} MPI2_EVENT_DATA_HBD_PHY, MPI2_POINTER PTR_MPI2_EVENT_DATA_HBD_PHY,
+  Mpi2EventDataHbdPhy_t, MPI2_POINTER pMpi2EventDataMpi2EventDataHbdPhy_t;
+
+/* values for the DescriptorType field */
+#define MPI2_EVENT_HBD_DT_SAS               (0x01)
+
+
+
 /****************************************************************************
 *  EventAck message
 ****************************************************************************/
@@ -1126,13 +1181,17 @@
 #define MPI2_FW_HEADER_PID_TYPE_MASK            (0xF000)
 #define MPI2_FW_HEADER_PID_TYPE_SAS             (0x2000)
 
-#define MPI2_FW_HEADER_PID_PROD_MASK            (0x0F00)
-#define MPI2_FW_HEADER_PID_PROD_A               (0x0000)
+#define MPI2_FW_HEADER_PID_PROD_MASK                    (0x0F00)
+#define MPI2_FW_HEADER_PID_PROD_A                       (0x0000)
+#define MPI2_FW_HEADER_PID_PROD_MASK                    (0x0F00)
+#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI   (0x0200)
+#define MPI2_FW_HEADER_PID_PROD_IR_SCSI                 (0x0700)
+
 
 #define MPI2_FW_HEADER_PID_FAMILY_MASK          (0x00FF)
 /* SAS */
-#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS      (0x0010)
-#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS      (0x0011)
+#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS      (0x0013)
+#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS      (0x0014)
 
 /* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
 
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 8a42b13..2d8aeed 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -6,7 +6,7 @@
  *          Title:  MPI Serial Attached SCSI structures and definitions
  *  Creation Date:  February 9, 2007
  *
- *  mpi2.h Version:  02.00.02
+ *  mpi2.h Version:  02.00.03
  *
  *  Version History
  *  ---------------
@@ -18,6 +18,8 @@
  *                      Control Request.
  *  10-02-08  02.00.02  Added Set IOC Parameter Operation to SAS IO Unit Control
  *                      Request.
+ *  10-28-09  02.00.03  Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
+ *                      to MPI2_SGE_IO_UNION since it supports chained SGLs.
  *  --------------------------------------------------------------------------
  */
 
@@ -160,7 +162,7 @@
     U32                     Reserved4;          /* 0x14 */
     U32                     DataLength;         /* 0x18 */
     U8                      CommandFIS[20];     /* 0x1C */
-    MPI2_SIMPLE_SGE_UNION   SGL;                /* 0x20 */
+    MPI2_SGE_IO_UNION       SGL;                /* 0x20 */
 } MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
   Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;
 
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 89d0240..88e6eeb 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -107,8 +107,7 @@
 	if (ret)
 		return ret;
 
-	printk(KERN_INFO "setting logging_level(0x%08x)\n",
-				mpt2sas_fwfault_debug);
+	printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
 	list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
 		ioc->fwfault_debug = mpt2sas_fwfault_debug;
 	return 0;
@@ -1222,6 +1221,8 @@
 	u32 memap_sz;
 	u32 pio_sz;
 	int i, r = 0;
+	u64 pio_chip = 0;
+	u64 chip_phys = 0;
 
 	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n",
 	    ioc->name, __func__));
@@ -1255,12 +1256,13 @@
 		if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) {
 			if (pio_sz)
 				continue;
-			ioc->pio_chip = pci_resource_start(pdev, i);
+			pio_chip = (u64)pci_resource_start(pdev, i);
 			pio_sz = pci_resource_len(pdev, i);
 		} else {
 			if (memap_sz)
 				continue;
 			ioc->chip_phys = pci_resource_start(pdev, i);
+			chip_phys = (u64)ioc->chip_phys;
 			memap_sz = pci_resource_len(pdev, i);
 			ioc->chip = ioremap(ioc->chip_phys, memap_sz);
 			if (ioc->chip == NULL) {
@@ -1280,10 +1282,10 @@
 	printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
 	    ioc->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
 	    "IO-APIC enabled"), ioc->pci_irq);
-	printk(MPT2SAS_INFO_FMT "iomem(0x%lx), mapped(0x%p), size(%d)\n",
-	    ioc->name, ioc->chip_phys, ioc->chip, memap_sz);
-	printk(MPT2SAS_INFO_FMT "ioport(0x%lx), size(%d)\n",
-	    ioc->name, ioc->pio_chip, pio_sz);
+	printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
+	    ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
+	printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
+	    ioc->name, (unsigned long long)pio_chip, pio_sz);
 
 	return 0;
 
@@ -3573,6 +3575,8 @@
 
 	init_waitqueue_head(&ioc->reset_wq);
 
+	ioc->fwfault_debug = mpt2sas_fwfault_debug;
+
 	/* base internal command bits */
 	mutex_init(&ioc->base_cmds.mutex);
 	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index bb4f146..e18b054 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,10 +69,10 @@
 #define MPT2SAS_DRIVER_NAME		"mpt2sas"
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION		"03.100.03.00"
-#define MPT2SAS_MAJOR_VERSION		03
+#define MPT2SAS_DRIVER_VERSION		"04.100.01.00"
+#define MPT2SAS_MAJOR_VERSION		04
 #define MPT2SAS_MINOR_VERSION		100
-#define MPT2SAS_BUILD_VERSION		03
+#define MPT2SAS_BUILD_VERSION		01
 #define MPT2SAS_RELEASE_VERSION		00
 
 /*
@@ -323,6 +323,7 @@
  * @device_info: bitfield provides detailed info about the hidden components
  * @num_pds: number of hidden raid components
  * @responding: used in _scsih_raid_device_mark_responding
+ * @percent_complete: resync percent complete
  */
 struct _raid_device {
 	struct list_head list;
@@ -336,6 +337,7 @@
 	u32	device_info;
 	u8	num_pds;
 	u8	responding;
+	u8	percent_complete;
 };
 
 /**
@@ -464,7 +466,6 @@
  * @pdev: pci pdev object
  * @chip: memory mapped register space
  * @chip_phys: physical addrss prior to mapping
- * @pio_chip: I/O mapped register space
  * @logging_level: see mpt2sas_debug.h
  * @fwfault_debug: debuging FW timeouts
  * @ir_firmware: IR firmware present
@@ -587,8 +588,7 @@
 	char		tmp_string[MPT_STRING_LENGTH];
 	struct pci_dev	*pdev;
 	Mpi2SystemInterfaceRegs_t __iomem *chip;
-	unsigned long	chip_phys;
-	unsigned long	pio_chip;
+	resource_size_t	chip_phys;
 	int		logging_level;
 	int		fwfault_debug;
 	u8		ir_firmware;
@@ -853,6 +853,8 @@
     *mpi_reply, Mpi2IOUnitPage1_t *config_page);
 int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
     *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
+int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
+    Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
 int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
     *mpi_reply, Mpi2IOCPage8_t *config_page);
 int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 594a389..411c27d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -324,7 +324,9 @@
 		if (r != 0)
 			goto out;
 		if (mpi_request->Action ==
-		    MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT) {
+		    MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
+		    mpi_request->Action ==
+		    MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
 			ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
 			    MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
 			    mem.page_dma);
@@ -882,7 +884,7 @@
 }
 
 /**
- * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 0
+ * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1
  * @ioc: per adapter object
  * @mpi_reply: reply mf payload returned from firmware
  * @config_page: contents of the config page
@@ -907,7 +909,7 @@
 	mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
 	mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
 	mpi_request.Header.PageNumber = 1;
-	mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
+	mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
 	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
@@ -922,6 +924,49 @@
 }
 
 /**
+ * mpt2sas_config_set_sas_iounit_pg1 - send sas iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+    *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz)
+{
+	Mpi2ConfigRequest_t mpi_request;
+	int r;
+
+	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+	mpi_request.Function = MPI2_FUNCTION_CONFIG;
+	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+	mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+	mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+	mpi_request.Header.PageNumber = 1;
+	mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
+	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+	r = _config_request(ioc, &mpi_request, mpi_reply,
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+	if (r)
+		goto out;
+
+	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+	_config_request(ioc, &mpi_request, mpi_reply,
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+	r = _config_request(ioc, &mpi_request, mpi_reply,
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+	return r;
+}
+
+/**
  * mpt2sas_config_get_expander_pg0 - obtain expander page 0
  * @ioc: per adapter object
  * @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 84a124f..fa9bf83 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -891,6 +891,7 @@
 
  issue_host_reset:
 	if (issue_reset) {
+		ret = -ENODATA;
 		if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
 		    mpi_request->Function ==
 		    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
@@ -2202,14 +2203,10 @@
 	karg.data_out_size = karg32.data_out_size;
 	karg.max_sense_bytes = karg32.max_sense_bytes;
 	karg.data_sge_offset = karg32.data_sge_offset;
-	memcpy(&karg.reply_frame_buf_ptr, &karg32.reply_frame_buf_ptr,
-	    sizeof(uint32_t));
-	memcpy(&karg.data_in_buf_ptr, &karg32.data_in_buf_ptr,
-	    sizeof(uint32_t));
-	memcpy(&karg.data_out_buf_ptr, &karg32.data_out_buf_ptr,
-	    sizeof(uint32_t));
-	memcpy(&karg.sense_data_ptr, &karg32.sense_data_ptr,
-	    sizeof(uint32_t));
+	karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
+	karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
+	karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
+	karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
 	state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
 	return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
 }
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index efabea1..c7ec3f1 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -52,6 +52,7 @@
 #include <linux/delay.h>
 #include <linux/pci.h>
 #include <linux/interrupt.h>
+#include <linux/raid_class.h>
 
 #include "mpt2sas_base.h"
 
@@ -133,6 +134,9 @@
 	void			*event_data;
 };
 
+/* raid transport support */
+static struct raid_template *mpt2sas_raid_template;
+
 /**
  * struct _scsi_io_transfer - scsi io transfer
  * @handle: sas device handle (assigned by firmware)
@@ -1305,7 +1309,6 @@
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
 	struct scsi_target *starget;
 	struct _raid_device *raid_device;
-	struct _sas_device *sas_device;
 	unsigned long flags;
 
 	sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
@@ -1332,21 +1335,8 @@
 		if (raid_device)
 			raid_device->sdev = sdev; /* raid is single lun */
 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
-	} else {
-		/* set TLR bit for SSP devices */
-		if (!(ioc->facts.IOCCapabilities &
-		     MPI2_IOCFACTS_CAPABILITY_TLR))
-			goto out;
-		spin_lock_irqsave(&ioc->sas_device_lock, flags);
-		sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
-		   sas_device_priv_data->sas_target->sas_address);
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		if (sas_device && sas_device->device_info &
-		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)
-			sas_device_priv_data->flags |= MPT_DEVICE_TLR_ON;
 	}
 
- out:
 	return 0;
 }
 
@@ -1419,6 +1409,140 @@
 }
 
 /**
+ * _scsih_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+_scsih_is_raid(struct device *dev)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+
+	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
+}
+
+/**
+ * _scsih_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+_scsih_get_resync(struct device *dev)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
+	static struct _raid_device *raid_device;
+	unsigned long flags;
+	Mpi2RaidVolPage0_t vol_pg0;
+	Mpi2ConfigReply_t mpi_reply;
+	u32 volume_status_flags;
+	u8 percent_complete = 0;
+
+	spin_lock_irqsave(&ioc->raid_device_lock, flags);
+	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+	    sdev->channel);
+	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+	if (!raid_device)
+		goto out;
+
+	if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
+	     sizeof(Mpi2RaidVolPage0_t))) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		goto out;
+	}
+
+	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+	if (volume_status_flags & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)
+		percent_complete = raid_device->percent_complete;
+ out:
+	raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
+}
+
+/**
+ * _scsih_get_state - get raid volume level
+ * @dev the device struct object
+ */
+static void
+_scsih_get_state(struct device *dev)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
+	static struct _raid_device *raid_device;
+	unsigned long flags;
+	Mpi2RaidVolPage0_t vol_pg0;
+	Mpi2ConfigReply_t mpi_reply;
+	u32 volstate;
+	enum raid_state state = RAID_STATE_UNKNOWN;
+
+	spin_lock_irqsave(&ioc->raid_device_lock, flags);
+	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+	    sdev->channel);
+	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+	if (!raid_device)
+		goto out;
+
+	if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
+	     sizeof(Mpi2RaidVolPage0_t))) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		goto out;
+	}
+
+	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
+		state = RAID_STATE_RESYNCING;
+		goto out;
+	}
+
+	switch (vol_pg0.VolumeState) {
+	case MPI2_RAID_VOL_STATE_OPTIMAL:
+	case MPI2_RAID_VOL_STATE_ONLINE:
+		state = RAID_STATE_ACTIVE;
+		break;
+	case  MPI2_RAID_VOL_STATE_DEGRADED:
+		state = RAID_STATE_DEGRADED;
+		break;
+	case MPI2_RAID_VOL_STATE_FAILED:
+	case MPI2_RAID_VOL_STATE_MISSING:
+		state = RAID_STATE_OFFLINE;
+		break;
+	}
+ out:
+	raid_set_state(mpt2sas_raid_template, dev, state);
+}
+
+/**
+ * _scsih_set_level - set raid level
+ * @sdev: scsi device struct
+ * @raid_device: raid_device object
+ */
+static void
+_scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device)
+{
+	enum raid_level level = RAID_LEVEL_UNKNOWN;
+
+	switch (raid_device->volume_type) {
+	case MPI2_RAID_VOL_TYPE_RAID0:
+		level = RAID_LEVEL_0;
+		break;
+	case MPI2_RAID_VOL_TYPE_RAID10:
+		level = RAID_LEVEL_10;
+		break;
+	case MPI2_RAID_VOL_TYPE_RAID1E:
+		level = RAID_LEVEL_1E;
+		break;
+	case MPI2_RAID_VOL_TYPE_RAID1:
+		level = RAID_LEVEL_1;
+		break;
+	}
+
+	raid_set_level(mpt2sas_raid_template, &sdev->sdev_gendev, level);
+}
+
+/**
  * _scsih_get_volume_capabilities - volume capabilities
  * @ioc: per adapter object
  * @sas_device: the raid_device object
@@ -1479,6 +1603,32 @@
 }
 
 /**
+ * _scsih_enable_tlr - setting TLR flags
+ * @ioc: per adapter object
+ * @sdev: scsi device struct
+ *
+ * Enabling Transaction Layer Retries for tape devices when
+ * vpd page 0x90 is present
+ *
+ */
+static void
+_scsih_enable_tlr(struct MPT2SAS_ADAPTER *ioc, struct scsi_device *sdev)
+{
+	/* only for TAPE */
+	if (sdev->type != TYPE_TAPE)
+		return;
+
+	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
+		return;
+
+	sas_enable_tlr(sdev);
+	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
+	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
+	return;
+
+}
+
+/**
  * _scsih_slave_configure - device configure routine.
  * @sdev: scsi device struct
  *
@@ -1574,6 +1724,8 @@
 		    (unsigned long long)raid_device->wwid,
 		    raid_device->num_pds, ds);
 		_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
+		/* raid transport support */
+		_scsih_set_level(sdev, raid_device);
 		return 0;
 	}
 
@@ -1621,8 +1773,10 @@
 
 	_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
 
-	if (ssp_target)
+	if (ssp_target) {
 		sas_read_port_mode_page(sdev);
+		_scsih_enable_tlr(ioc, sdev);
+	}
 	return 0;
 }
 
@@ -2908,8 +3062,9 @@
 
 	} else
 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
-
-	if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON))
+	/* Make sure Device is not raid volume */
+	if (!_scsih_is_raid(&scmd->device->sdev_gendev) &&
+	    sas_is_tlr_enabled(scmd->device))
 		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
 
 	smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
@@ -3298,10 +3453,12 @@
 		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
 	if (!sas_device_priv_data->tlr_snoop_check) {
 		sas_device_priv_data->tlr_snoop_check++;
-		if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
-		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME)
-			sas_device_priv_data->flags &=
-			    ~MPT_DEVICE_TLR_ON;
+	if (!_scsih_is_raid(&scmd->device->sdev_gendev) &&
+		sas_is_tlr_enabled(scmd->device) &&
+		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
+			sas_disable_tlr(scmd->device);
+			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
+		}
 	}
 
 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
@@ -5170,11 +5327,33 @@
 _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
     struct fw_event_work *fw_event)
 {
+	Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
+	static struct _raid_device *raid_device;
+	unsigned long flags;
+	u16 handle;
+
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
 		_scsih_sas_ir_operation_status_event_debug(ioc,
-		     fw_event->event_data);
+		     event_data);
 #endif
+
+	/* code added for raid transport support */
+	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
+
+		handle = le16_to_cpu(event_data->VolDevHandle);
+
+		spin_lock_irqsave(&ioc->raid_device_lock, flags);
+		raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+		if (!raid_device)
+			return;
+
+		if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC)
+			raid_device->percent_complete =
+			    event_data->PercentComplete;
+	}
 }
 
 /**
@@ -5998,6 +6177,8 @@
 	struct _sas_port *mpt2sas_port;
 	struct _sas_device *sas_device;
 	struct _sas_node *expander_sibling;
+	struct _raid_device *raid_device, *next;
+	struct MPT2SAS_TARGET *sas_target_priv_data;
 	struct workqueue_struct	*wq;
 	unsigned long flags;
 
@@ -6011,6 +6192,21 @@
 	if (wq)
 		destroy_workqueue(wq);
 
+	/* release all the volumes */
+	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
+	    list) {
+		if (raid_device->starget) {
+			sas_target_priv_data =
+			    raid_device->starget->hostdata;
+			sas_target_priv_data->deleted = 1;
+			scsi_remove_target(&raid_device->starget->dev);
+		}
+		printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
+		    "(0x%016llx)\n", ioc->name,  raid_device->handle,
+		    (unsigned long long) raid_device->wwid);
+		_scsih_raid_device_remove(ioc, raid_device);
+	}
+
 	/* free ports attached to the sas_host */
  retry_again:
 	list_for_each_entry(mpt2sas_port,
@@ -6373,6 +6569,13 @@
 #endif
 };
 
+/* raid transport support */
+static struct raid_function_template mpt2sas_raid_functions = {
+	.cookie		= &scsih_driver_template,
+	.is_raid	= _scsih_is_raid,
+	.get_resync	= _scsih_get_resync,
+	.get_state	= _scsih_get_state,
+};
 
 /**
  * _scsih_init - main entry point for this driver.
@@ -6392,6 +6595,12 @@
 	    sas_attach_transport(&mpt2sas_transport_functions);
 	if (!mpt2sas_transport_template)
 		return -ENODEV;
+	/* raid transport support */
+	mpt2sas_raid_template = raid_class_attach(&mpt2sas_raid_functions);
+	if (!mpt2sas_raid_template) {
+		sas_release_transport(mpt2sas_transport_template);
+		return -ENODEV;
+	}
 
 	mpt2sas_base_initialize_callback_handler();
 
@@ -6426,8 +6635,11 @@
 	mpt2sas_ctl_init();
 
 	error = pci_register_driver(&scsih_driver);
-	if (error)
+	if (error) {
+		/* raid transport support */
+		raid_class_release(mpt2sas_raid_template);
 		sas_release_transport(mpt2sas_transport_template);
+	}
 
 	return error;
 }
@@ -6445,7 +6657,8 @@
 
 	pci_unregister_driver(&scsih_driver);
 
-	sas_release_transport(mpt2sas_transport_template);
+	mpt2sas_ctl_exit();
+
 	mpt2sas_base_release_callback_handler(scsi_io_cb_idx);
 	mpt2sas_base_release_callback_handler(tm_cb_idx);
 	mpt2sas_base_release_callback_handler(base_cb_idx);
@@ -6457,7 +6670,10 @@
 	mpt2sas_base_release_callback_handler(tm_tr_cb_idx);
 	mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx);
 
-	mpt2sas_ctl_exit();
+	/* raid transport support */
+	raid_class_release(mpt2sas_raid_template);
+	sas_release_transport(mpt2sas_transport_template);
+
 }
 
 module_init(_scsih_init);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 3a82872..789f9ee 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -855,6 +855,17 @@
 	return shost_priv(shost);
 }
 
+static struct _sas_phy *
+_transport_find_local_phy(struct MPT2SAS_ADAPTER *ioc, struct sas_phy *phy)
+{
+	int i;
+
+	for (i = 0; i < ioc->sas_hba.num_phys; i++)
+		if (ioc->sas_hba.phy[i].phy == phy)
+			return(&ioc->sas_hba.phy[i]);
+	return NULL;
+}
+
 /**
  * _transport_get_linkerrors -
  * @phy: The sas phy object
@@ -870,14 +881,8 @@
 	struct _sas_phy *mpt2sas_phy;
 	Mpi2ConfigReply_t mpi_reply;
 	Mpi2SasPhyPage1_t phy_pg1;
-	int i;
 
-	for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys &&
-	    !mpt2sas_phy; i++) {
-		if (ioc->sas_hba.phy[i].phy != phy)
-			continue;
-		mpt2sas_phy = &ioc->sas_hba.phy[i];
-	}
+	mpt2sas_phy = _transport_find_local_phy(ioc, phy);
 
 	if (!mpt2sas_phy) /* this phy not on sas_host */
 		return -EINVAL;
@@ -971,14 +976,8 @@
 	struct _sas_phy *mpt2sas_phy;
 	Mpi2SasIoUnitControlReply_t mpi_reply;
 	Mpi2SasIoUnitControlRequest_t mpi_request;
-	int i;
 
-	for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys &&
-	    !mpt2sas_phy; i++) {
-		if (ioc->sas_hba.phy[i].phy != phy)
-			continue;
-		mpt2sas_phy = &ioc->sas_hba.phy[i];
-	}
+	mpt2sas_phy = _transport_find_local_phy(ioc, phy);
 
 	if (!mpt2sas_phy) /* this phy not on sas_host */
 		return -EINVAL;
@@ -1006,6 +1005,173 @@
 }
 
 /**
+ * _transport_phy_enable - enable/disable phys
+ * @phy: The sas phy object
+ * @enable: enable phy when true
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_enable(struct sas_phy *phy, int enable)
+{
+	struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
+	struct _sas_phy *mpt2sas_phy;
+	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+	Mpi2ConfigReply_t mpi_reply;
+	u16 ioc_status;
+	u16 sz;
+	int rc = 0;
+
+	mpt2sas_phy = _transport_find_local_phy(ioc, phy);
+
+	if (!mpt2sas_phy) /* this phy not on sas_host */
+		return -EINVAL;
+
+	/* sas_iounit page 1 */
+	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+	    sizeof(Mpi2SasIOUnit1PhyData_t));
+	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+	if (!sas_iounit_pg1) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		rc = -ENOMEM;
+		goto out;
+	}
+	if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+	    sas_iounit_pg1, sz))) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		rc = -ENXIO;
+		goto out;
+	}
+	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+	    MPI2_IOCSTATUS_MASK;
+	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		rc = -EIO;
+		goto out;
+	}
+
+	if (enable)
+		sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags
+		    &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+	else
+		sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags
+		    |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+
+	mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz);
+
+ out:
+	kfree(sas_iounit_pg1);
+	return rc;
+}
+
+/**
+ * _transport_phy_speed - set phy min/max link rates
+ * @phy: The sas phy object
+ * @rates: rates defined in sas_phy_linkrates
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
+{
+	struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
+	struct _sas_phy *mpt2sas_phy;
+	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+	Mpi2SasPhyPage0_t phy_pg0;
+	Mpi2ConfigReply_t mpi_reply;
+	u16 ioc_status;
+	u16 sz;
+	int i;
+	int rc = 0;
+
+	mpt2sas_phy = _transport_find_local_phy(ioc, phy);
+
+	if (!mpt2sas_phy) /* this phy not on sas_host */
+		return -EINVAL;
+
+	if (!rates->minimum_linkrate)
+		rates->minimum_linkrate = phy->minimum_linkrate;
+	else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
+		rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+	if (!rates->maximum_linkrate)
+		rates->maximum_linkrate = phy->maximum_linkrate;
+	else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
+		rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+	/* sas_iounit page 1 */
+	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+	    sizeof(Mpi2SasIOUnit1PhyData_t));
+	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+	if (!sas_iounit_pg1) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		rc = -ENOMEM;
+		goto out;
+	}
+	if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+	    sas_iounit_pg1, sz))) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		rc = -ENXIO;
+		goto out;
+	}
+	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+	    MPI2_IOCSTATUS_MASK;
+	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		rc = -EIO;
+		goto out;
+	}
+
+	for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+		if (mpt2sas_phy->phy_id != i) {
+			sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+			    (ioc->sas_hba.phy[i].phy->minimum_linkrate +
+			    (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
+		} else {
+			sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+			    (rates->minimum_linkrate +
+			    (rates->maximum_linkrate << 4));
+		}
+	}
+
+	if (mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+	    sz)) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		rc = -ENXIO;
+		goto out;
+	}
+
+	/* link reset */
+	_transport_phy_reset(phy, 0);
+
+	/* read phy page 0, then update the rates in the sas transport phy */
+	if (!mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+	    mpt2sas_phy->phy_id)) {
+		phy->minimum_linkrate = _transport_convert_phy_link_rate(
+		    phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+		phy->maximum_linkrate = _transport_convert_phy_link_rate(
+		    phy_pg0.ProgrammedLinkRate >> 4);
+		phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+		    phy_pg0.NegotiatedLinkRate &
+		    MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+	}
+
+ out:
+	kfree(sas_iounit_pg1);
+	return rc;
+}
+
+
+/**
  * _transport_smp_handler - transport portal for smp passthru
  * @shost: shost object
  * @rphy: sas transport rphy object
@@ -1207,6 +1373,8 @@
 	.get_enclosure_identifier = _transport_get_enclosure_identifier,
 	.get_bay_identifier	= _transport_get_bay_identifier,
 	.phy_reset		= _transport_phy_reset,
+	.phy_enable		= _transport_phy_enable,
+	.set_phy_speed		= _transport_phy_speed,
 	.smp_handler		= _transport_smp_handler,
 };
 
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index c2f1032..f80c1da8 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -654,7 +654,7 @@
 	}
 	chip = &pm8001_chips[ent->driver_data];
 	SHOST_TO_SAS_HA(shost) =
-		kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
+		kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
 	if (!SHOST_TO_SAS_HA(shost)) {
 		rc = -ENOMEM;
 		goto err_out_free_host;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8371d91..49ac414 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1640,8 +1640,10 @@
 	uint16_t mb[MAILBOX_REGISTER_COUNT], i;
 	int err;
 
+	spin_unlock_irq(ha->host->host_lock);
 	err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
 			       &ha->pdev->dev);
+	spin_lock_irq(ha->host->host_lock);
 	if (err) {
 		printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
 		       ql1280_board_tbl[ha->devnum].fwname, err);
@@ -1699,8 +1701,10 @@
 		return -ENOMEM;
 #endif
 
+	spin_unlock_irq(ha->host->host_lock);
 	err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
 			       &ha->pdev->dev);
+	spin_lock_irq(ha->host->host_lock);
 	if (err) {
 		printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
 		       ql1280_board_tbl[ha->devnum].fwname, err);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3a9f5b2..90d1e06 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -11,7 +11,9 @@
 #include <linux/delay.h>
 
 static int qla24xx_vport_disable(struct fc_vport *, bool);
-
+static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
+int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
+static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
 /* SYSFS attributes --------------------------------------------------------- */
 
 static ssize_t
@@ -1168,6 +1170,28 @@
 }
 
 static ssize_t
+qla24xx_84xx_fw_version_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int rval = QLA_SUCCESS;
+	uint16_t status[2] = {0, 0};
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_QLA84XX(ha) && ha->cs84xx) {
+		if (ha->cs84xx->op_fw_version == 0) {
+			rval = qla84xx_verify_chip(vha, status);
+	}
+
+	if ((rval == QLA_SUCCESS) && (status[0] == 0))
+		return snprintf(buf, PAGE_SIZE, "%u\n",
+			(uint32_t)ha->cs84xx->op_fw_version);
+	}
+
+	return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static ssize_t
 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
     char *buf)
 {
@@ -1281,6 +1305,8 @@
 		   qla2x00_optrom_fcode_version_show, NULL);
 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
 		   NULL);
+static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
+		   NULL);
 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
 		   NULL);
 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
@@ -1310,6 +1336,7 @@
 	&dev_attr_optrom_efi_version,
 	&dev_attr_optrom_fcode_version,
 	&dev_attr_optrom_fw_version,
+	&dev_attr_84xx_fw_version,
 	&dev_attr_total_isp_aborts,
 	&dev_attr_mpi_version,
 	&dev_attr_phy_version,
@@ -1504,8 +1531,6 @@
 		fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
 			fcport->loop_id, fcport->d_id.b.domain,
 			fcport->d_id.b.area, fcport->d_id.b.al_pa);
-
-	qla2x00_abort_fcport_cmds(fcport);
 }
 
 static int
@@ -1795,6 +1820,581 @@
 	return 0;
 }
 
+/* BSG support for ELS/CT pass through */
+inline srb_t *
+qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
+{
+	srb_t *sp;
+	struct qla_hw_data *ha = vha->hw;
+	struct srb_bsg_ctx *ctx;
+
+	sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
+	if (!sp)
+		goto done;
+	ctx = kzalloc(size, GFP_KERNEL);
+	if (!ctx) {
+		mempool_free(sp, ha->srb_mempool);
+		goto done;
+	}
+
+	memset(sp, 0, sizeof(*sp));
+	sp->fcport = fcport;
+	sp->ctx = ctx;
+done:
+	return sp;
+}
+
+static int
+qla2x00_process_els(struct fc_bsg_job *bsg_job)
+{
+	struct fc_rport *rport;
+	fc_port_t *fcport;
+	struct Scsi_Host *host;
+	scsi_qla_host_t *vha;
+	struct qla_hw_data *ha;
+	srb_t *sp;
+	const char *type;
+	int req_sg_cnt, rsp_sg_cnt;
+	int rval =  (DRIVER_ERROR << 16);
+	uint16_t nextlid = 0;
+	struct srb_bsg *els;
+
+	/*  Multiple SG's are not supported for ELS requests */
+        if (bsg_job->request_payload.sg_cnt > 1 ||
+		bsg_job->reply_payload.sg_cnt > 1) {
+		DEBUG2(printk(KERN_INFO
+		    "multiple SG's are not supported for ELS requests"
+		    " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
+		    bsg_job->request_payload.sg_cnt,
+		    bsg_job->reply_payload.sg_cnt));
+		rval = -EPERM;
+		goto done;
+        }
+
+	/* ELS request for rport */
+	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+		rport = bsg_job->rport;
+		fcport = *(fc_port_t **) rport->dd_data;
+		host = rport_to_shost(rport);
+		vha = shost_priv(host);
+		ha = vha->hw;
+		type = "FC_BSG_RPT_ELS";
+
+		/* make sure the rport is logged in,
+		 * if not perform fabric login
+		 */
+		if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
+			DEBUG2(qla_printk(KERN_WARNING, ha,
+			    "failed to login port %06X for ELS passthru\n",
+			    fcport->d_id.b24));
+			rval = -EIO;
+			goto done;
+		}
+	} else {
+		host = bsg_job->shost;
+		vha = shost_priv(host);
+		ha = vha->hw;
+		type = "FC_BSG_HST_ELS_NOLOGIN";
+
+		/* Allocate a dummy fcport structure, since functions
+		 * preparing the IOCB and mailbox command retrieves port
+		 * specific information from fcport structure. For Host based
+		 * ELS commands there will be no fcport structure allocated
+		 */
+		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+		if (!fcport) {
+			rval = -ENOMEM;
+			goto done;
+		}
+
+		/* Initialize all required  fields of fcport */
+		fcport->vha = vha;
+		fcport->vp_idx = vha->vp_idx;
+		fcport->d_id.b.al_pa =
+		    bsg_job->request->rqst_data.h_els.port_id[0];
+		fcport->d_id.b.area =
+		    bsg_job->request->rqst_data.h_els.port_id[1];
+		fcport->d_id.b.domain =
+		    bsg_job->request->rqst_data.h_els.port_id[2];
+		fcport->loop_id =
+		    (fcport->d_id.b.al_pa == 0xFD) ?
+		    NPH_FABRIC_CONTROLLER : NPH_F_PORT;
+	}
+
+	if (!vha->flags.online) {
+		DEBUG2(qla_printk(KERN_WARNING, ha,
+		    "host not online\n"));
+		rval = -EIO;
+		goto done;
+	}
+
+        req_sg_cnt =
+	    dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+        if (!req_sg_cnt) {
+		rval = -ENOMEM;
+		goto done_free_fcport;
+	}
+        rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+        if (!rsp_sg_cnt) {
+		rval = -ENOMEM;
+                goto done_free_fcport;
+	}
+
+	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
+	    (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
+	{
+		DEBUG2(printk(KERN_INFO
+		    "dma mapping resulted in different sg counts \
+		    [request_sg_cnt: %x dma_request_sg_cnt: %x\
+		    reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+		    bsg_job->request_payload.sg_cnt, req_sg_cnt,
+		    bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+		rval = -EAGAIN;
+                goto done_unmap_sg;
+	}
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
+	if (!sp) {
+		rval = -ENOMEM;
+                goto done_unmap_sg;
+	}
+
+	els = sp->ctx;
+	els->ctx.type =
+	    (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
+	    SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+	els->bsg_job = bsg_job;
+
+	DEBUG2(qla_printk(KERN_INFO, ha,
+	    "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
+	    "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
+	    bsg_job->request->rqst_data.h_els.command_code,
+	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+	    fcport->d_id.b.al_pa));
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		kfree(sp->ctx);
+		mempool_free(sp, ha->srb_mempool);
+		rval = -EIO;
+		goto done_unmap_sg;
+	}
+	return rval;
+
+done_unmap_sg:
+	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	goto done_free_fcport;
+
+done_free_fcport:
+	if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
+		kfree(fcport);
+done:
+	return rval;
+}
+
+static int
+qla2x00_process_ct(struct fc_bsg_job *bsg_job)
+{
+	srb_t *sp;
+	struct Scsi_Host *host = bsg_job->shost;
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = (DRIVER_ERROR << 16);
+	int req_sg_cnt, rsp_sg_cnt;
+	uint16_t loop_id;
+	struct fc_port *fcport;
+	char  *type = "FC_BSG_HST_CT";
+	struct srb_bsg *ct;
+
+	/* pass through is supported only for ISP 4Gb or higher */
+        if (!IS_FWI2_CAPABLE(ha)) {
+		DEBUG2(qla_printk(KERN_INFO, ha,
+		    "scsi(%ld):Firmware is not capable to support FC "
+		    "CT pass thru\n", vha->host_no));
+		rval = -EPERM;
+                goto done;
+	}
+
+        req_sg_cnt =
+	    dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+        if (!req_sg_cnt) {
+		rval = -ENOMEM;
+		goto done;
+	}
+
+        rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+            bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+        if (!rsp_sg_cnt) {
+		rval = -ENOMEM;
+                goto done;
+	}
+
+	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
+		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
+	{
+		DEBUG2(qla_printk(KERN_WARNING, ha,
+		    "dma mapping resulted in different sg counts \
+		    [request_sg_cnt: %x dma_request_sg_cnt: %x\
+		    reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+		    bsg_job->request_payload.sg_cnt, req_sg_cnt,
+		    bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+		rval = -EAGAIN;
+                goto done_unmap_sg;
+	}
+
+	if (!vha->flags.online) {
+		DEBUG2(qla_printk(KERN_WARNING, ha,
+		    "host not online\n"));
+		rval = -EIO;
+                goto done_unmap_sg;
+	}
+
+	loop_id =
+	    (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+	    >> 24;
+	switch (loop_id) {
+		case 0xFC:
+			loop_id = cpu_to_le16(NPH_SNS);
+			break;
+		case 0xFA:
+			loop_id = vha->mgmt_svr_loop_id;
+			break;
+		default:
+			DEBUG2(qla_printk(KERN_INFO, ha,
+			    "Unknown loop id: %x\n", loop_id));
+			rval = -EINVAL;
+			goto done_unmap_sg;
+	}
+
+	/* Allocate a dummy fcport structure, since functions preparing the
+	 * IOCB and mailbox command retrieves port specific information
+	 * from fcport structure. For Host based ELS commands there will be
+	 * no fcport structure allocated
+	 */
+	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+	if (!fcport)
+	{
+		rval = -ENOMEM;
+		goto  done_unmap_sg;
+	}
+
+	/* Initialize all required  fields of fcport */
+	fcport->vha = vha;
+	fcport->vp_idx = vha->vp_idx;
+	fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
+	fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
+	fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
+	fcport->loop_id = loop_id;
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
+	if (!sp) {
+		rval = -ENOMEM;
+		goto done_free_fcport;
+	}
+
+	ct = sp->ctx;
+	ct->ctx.type = SRB_CT_CMD;
+	ct->bsg_job = bsg_job;
+
+	DEBUG2(qla_printk(KERN_INFO, ha,
+	    "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
+	    "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
+	    (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+	    fcport->d_id.b.al_pa));
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		kfree(sp->ctx);
+		mempool_free(sp, ha->srb_mempool);
+		rval = -EIO;
+		goto done_free_fcport;
+	}
+	return rval;
+
+done_free_fcport:
+	kfree(fcport);
+done_unmap_sg:
+	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done:
+	return rval;
+}
+
+static int
+qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = bsg_job->shost;
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval;
+	uint8_t command_sent;
+	uint32_t vendor_cmd;
+	char *type;
+	struct msg_echo_lb elreq;
+	uint16_t response[MAILBOX_REGISTER_COUNT];
+	uint8_t* fw_sts_ptr;
+	uint8_t *req_data;
+	dma_addr_t req_data_dma;
+	uint32_t req_data_len;
+	uint8_t *rsp_data;
+	dma_addr_t rsp_data_dma;
+	uint32_t rsp_data_len;
+
+	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+	    test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+		rval = -EBUSY;
+		goto done;
+	}
+
+	if (!vha->flags.online) {
+		DEBUG2(qla_printk(KERN_WARNING, ha,
+		    "host not online\n"));
+		rval = -EIO;
+                goto done;
+	}
+
+        elreq.req_sg_cnt =
+	    dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+        if (!elreq.req_sg_cnt) {
+		rval = -ENOMEM;
+		goto done;
+	}
+        elreq.rsp_sg_cnt =
+	    dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+        if (!elreq.rsp_sg_cnt) {
+		rval = -ENOMEM;
+                goto done;
+	}
+
+	if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
+	    (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
+	{
+		DEBUG2(printk(KERN_INFO
+		    "dma mapping resulted in different sg counts \
+		    [request_sg_cnt: %x dma_request_sg_cnt: %x\
+		    reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+		    bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
+		    bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
+		rval = -EAGAIN;
+                goto done_unmap_sg;
+	}
+	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+	req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
+	    &req_data_dma, GFP_KERNEL);
+
+	rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
+	    &rsp_data_dma, GFP_KERNEL);
+
+	/* Copy the request buffer in req_data now */
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, req_data,
+	    req_data_len);
+
+	elreq.send_dma = req_data_dma;
+	elreq.rcv_dma = rsp_data_dma;
+	elreq.transfer_size = req_data_len;
+
+	/* Vendor cmd : loopback or ECHO diagnostic
+	 * Options:
+	 * 	Loopback : Either internal or external loopback
+	 * 	ECHO: ECHO ELS or Vendor specific FC4  link data
+	 */
+	vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
+	elreq.options =
+	    *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
+	    + 1);
+
+	switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
+	case QL_VND_LOOPBACK:
+		if (ha->current_topology != ISP_CFG_F) {
+			type = "FC_BSG_HST_VENDOR_LOOPBACK";
+
+			DEBUG2(qla_printk(KERN_INFO, ha,
+				"scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
+				vha->host_no, type, vendor_cmd, elreq.options));
+
+			command_sent = INT_DEF_LB_LOOPBACK_CMD;
+			rval = qla2x00_loopback_test(vha, &elreq, response);
+			if (IS_QLA81XX(ha)) {
+				if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
+					DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
+						"ISP\n", __func__, vha->host_no));
+					set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+					qla2xxx_wake_dpc(vha);
+				 }
+			}
+		} else {
+			type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
+			DEBUG2(qla_printk(KERN_INFO, ha,
+				"scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
+				vha->host_no, type, vendor_cmd, elreq.options));
+
+			command_sent = INT_DEF_LB_ECHO_CMD;
+			rval = qla2x00_echo_test(vha, &elreq, response);
+		}
+		break;
+	case QLA84_RESET:
+		if (!IS_QLA84XX(vha->hw)) {
+			rval = -EINVAL;
+			DEBUG16(printk(
+				"%s(%ld): 8xxx exiting.\n",
+				__func__, vha->host_no));
+			return rval;
+		}
+		rval = qla84xx_reset(vha, &elreq, bsg_job);
+		break;
+	case QLA84_MGMT_CMD:
+		if (!IS_QLA84XX(vha->hw)) {
+			rval = -EINVAL;
+			DEBUG16(printk(
+				"%s(%ld): 8xxx exiting.\n",
+				__func__, vha->host_no));
+			return rval;
+		}
+		rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
+		break;
+	default:
+		rval = -ENOSYS;
+	}
+
+	if (rval != QLA_SUCCESS) {
+		DEBUG2(qla_printk(KERN_WARNING, ha,
+			"scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
+		rval = 0;
+		bsg_job->reply->result = (DID_ERROR << 16);
+		bsg_job->reply->reply_payload_rcv_len = 0;
+		fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+		memcpy( fw_sts_ptr, response, sizeof(response));
+		fw_sts_ptr += sizeof(response);
+                *fw_sts_ptr = command_sent;
+	} else {
+		DEBUG2(qla_printk(KERN_WARNING, ha,
+			"scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
+		rval = bsg_job->reply->result = 0;
+		bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
+		bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+		fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+		memcpy(fw_sts_ptr, response, sizeof(response));
+		fw_sts_ptr += sizeof(response);
+		*fw_sts_ptr = command_sent;
+		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+		bsg_job->reply_payload.sg_cnt, rsp_data,
+		rsp_data_len);
+	}
+	bsg_job->job_done(bsg_job);
+
+done_unmap_sg:
+
+	if(req_data)
+		dma_free_coherent(&ha->pdev->dev, req_data_len,
+			req_data, req_data_dma);
+	dma_unmap_sg(&ha->pdev->dev,
+	    bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	dma_unmap_sg(&ha->pdev->dev,
+	    bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+done:
+        return rval;
+}
+
+static int
+qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
+{
+        int ret = -EINVAL;
+
+        switch (bsg_job->request->msgcode) {
+		case FC_BSG_RPT_ELS:
+		case FC_BSG_HST_ELS_NOLOGIN:
+			ret = qla2x00_process_els(bsg_job);
+			break;
+		case FC_BSG_HST_CT:
+			ret = qla2x00_process_ct(bsg_job);
+			break;
+		case FC_BSG_HST_VENDOR:
+			ret = qla2x00_process_vendor_specific(bsg_job);
+			break;
+		case FC_BSG_HST_ADD_RPORT:
+		case FC_BSG_HST_DEL_RPORT:
+		case FC_BSG_RPT_CT:
+		default:
+			DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
+			break;
+        }
+	return ret;
+}
+
+static int
+qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
+{
+        scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
+        struct qla_hw_data *ha = vha->hw;
+        srb_t *sp;
+        int cnt, que;
+        unsigned long flags;
+        struct req_que *req;
+	struct srb_bsg *sp_bsg;
+
+	/* find the bsg job from the active list of commands */
+        spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (que = 0; que < ha->max_req_queues; que++) {
+		req = ha->req_q_map[que];
+		if (!req)
+			continue;
+
+		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
+			sp = req->outstanding_cmds[cnt];
+
+			if (sp) {
+				sp_bsg = (struct srb_bsg*)sp->ctx;
+
+				if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
+				    (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
+				    || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
+				    (sp_bsg->bsg_job == bsg_job)) {
+					if (ha->isp_ops->abort_command(sp)) {
+						DEBUG2(qla_printk(KERN_INFO, ha,
+						"scsi(%ld): mbx abort_command failed\n", vha->host_no));
+						bsg_job->req->errors = bsg_job->reply->result = -EIO;
+					} else {
+						DEBUG2(qla_printk(KERN_INFO, ha,
+						"scsi(%ld): mbx abort_command success\n", vha->host_no));
+						bsg_job->req->errors = bsg_job->reply->result = 0;
+					}
+					goto done;
+				}
+			}
+		}
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	DEBUG2(qla_printk(KERN_INFO, ha,
+		"scsi(%ld) SRB not found to abort\n", vha->host_no));
+	bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
+	return 0;
+
+done:
+	if (bsg_job->request->msgcode == FC_BSG_HST_CT)
+		kfree(sp->fcport);
+	kfree(sp->ctx);
+	mempool_free(sp, ha->srb_mempool);
+	return 0;
+}
+
 struct fc_function_template qla2xxx_transport_functions = {
 
 	.show_host_node_name = 1,
@@ -1838,6 +2438,8 @@
 	.vport_create = qla24xx_vport_create,
 	.vport_disable = qla24xx_vport_disable,
 	.vport_delete = qla24xx_vport_delete,
+	.bsg_request = qla24xx_bsg_request,
+	.bsg_timeout = qla24xx_bsg_timeout,
 };
 
 struct fc_function_template qla2xxx_transport_vport_functions = {
@@ -1878,6 +2480,8 @@
 	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
 	.terminate_rport_io = qla2x00_terminate_rport_io,
 	.get_fc_host_stats = qla2x00_get_fc_host_stats,
+	.bsg_request = qla24xx_bsg_request,
+	.bsg_timeout = qla24xx_bsg_timeout,
 };
 
 void
@@ -1906,3 +2510,125 @@
 		speed = FC_PORTSPEED_1GBIT;
 	fc_host_supported_speeds(vha->host) = speed;
 }
+static int
+qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
+{
+	int             ret = 0;
+	int             cmd;
+	uint16_t        cmd_status;
+
+	DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+
+	cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
+			== A84_RESET_FLAG_ENABLE_DIAG_FW ?
+				A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
+	ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
+	&cmd_status);
+	return ret;
+}
+
+static int
+qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
+{
+	struct access_chip_84xx *mn;
+	dma_addr_t mn_dma, mgmt_dma;
+	void *mgmt_b = NULL;
+	int ret = 0;
+	int rsp_hdr_len, len = 0;
+	struct qla84_msg_mgmt *ql84_mgmt;
+
+	ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
+	ql84_mgmt->cmd =
+		*((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
+	ql84_mgmt->mgmtp.u.mem.start_addr =
+		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
+	ql84_mgmt->len =
+		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
+	ql84_mgmt->mgmtp.u.config.id =
+		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
+	ql84_mgmt->mgmtp.u.config.param0 =
+		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
+	ql84_mgmt->mgmtp.u.config.param1 =
+		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
+	ql84_mgmt->mgmtp.u.info.type =
+		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
+	ql84_mgmt->mgmtp.u.info.context =
+		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
+
+	rsp_hdr_len = bsg_job->request_payload.payload_len;
+
+	mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
+	if (mn == NULL) {
+		DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
+		"failed%lu\n", __func__, ha->host_no));
+		return -ENOMEM;
+	}
+
+	memset(mn, 0, sizeof (struct access_chip_84xx));
+
+	mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
+	mn->entry_count = 1;
+
+	switch (ql84_mgmt->cmd) {
+	case QLA84_MGMT_READ_MEM:
+		mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
+		mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
+		break;
+	case QLA84_MGMT_WRITE_MEM:
+		mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
+		mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
+		break;
+	case QLA84_MGMT_CHNG_CONFIG:
+		mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
+		mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
+		mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
+		mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
+		break;
+	case QLA84_MGMT_GET_INFO:
+		mn->options = cpu_to_le16(ACO_REQUEST_INFO);
+		mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
+		mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
+		break;
+	default:
+		ret = -EIO;
+		goto exit_mgmt0;
+	}
+
+	if ((len == ql84_mgmt->len) &&
+		ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
+		mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
+				&mgmt_dma, GFP_KERNEL);
+		if (mgmt_b == NULL) {
+			DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
+			"failed%lu\n", __func__, ha->host_no));
+			ret = -ENOMEM;
+			goto exit_mgmt0;
+		}
+		mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
+		mn->dseg_count = cpu_to_le16(1);
+		mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
+		mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
+		mn->dseg_length = cpu_to_le32(len);
+
+		if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
+			memcpy(mgmt_b, ql84_mgmt->payload, len);
+		}
+	}
+
+	ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
+	if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
+		|| (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
+			if (ret != QLA_SUCCESS)
+				DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
+					__func__, ha->host_no));
+	} else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
+			(ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
+	}
+
+	if (mgmt_b)
+		dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
+
+exit_mgmt0:
+	dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
+	return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 1263d97..afa9561 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -31,6 +31,7 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
 
 #define QLA2XXX_DRIVER_NAME  "qla2xxx"
 
@@ -228,6 +229,27 @@
 	uint16_t flags;
 };
 
+struct srb_bsg_ctx {
+#define SRB_ELS_CMD_RPT 3
+#define SRB_ELS_CMD_HST 4
+#define SRB_CT_CMD 5
+	uint16_t type;
+};
+
+struct srb_bsg {
+	struct srb_bsg_ctx ctx;
+	struct fc_bsg_job *bsg_job;
+};
+
+struct msg_echo_lb {
+	dma_addr_t send_dma;
+	dma_addr_t rcv_dma;
+	uint16_t req_sg_cnt;
+	uint16_t rsp_sg_cnt;
+	uint16_t options;
+	uint32_t transfer_size;
+};
+
 /*
  * ISP I/O Register Set structure definitions.
  */
@@ -522,6 +544,8 @@
 #define MBA_DISCARD_RND_FRAME	0x8048	/* discard RND frame due to error. */
 #define MBA_REJECTED_FCP_CMD	0x8049	/* rejected FCP_CMD. */
 
+/* ISP mailbox loopback echo diagnostic error code */
+#define MBS_LB_RESET	0x17
 /*
  * Firmware options 1, 2, 3.
  */
@@ -2230,6 +2254,13 @@
 	int max_q_depth;
 };
 
+/* Place holder for FW buffer parameters */
+struct qlfc_fw {
+	void *fw_buf;
+	dma_addr_t fw_dma;
+	uint32_t len;
+};
+
 /*
  * Qlogic host adapter specific data structure.
 */
@@ -2594,6 +2625,7 @@
 	struct qla_statistics qla_stats;
 	struct isp_operations *isp_ops;
 	struct workqueue_struct *wq;
+	struct qlfc_fw fw_buf;
 };
 
 /*
@@ -2766,4 +2798,127 @@
 
 #define CMD_SP(Cmnd)		((Cmnd)->SCp.ptr)
 
+/*
+ * BSG Vendor specific commands
+ */
+
+#define QL_VND_LOOPBACK		0x01
+#define QLA84_RESET		0x02
+#define QLA84_UPDATE_FW		0x03
+#define QLA84_MGMT_CMD		0x04
+
+/* BSG definations for interpreting CommandSent field */
+#define INT_DEF_LB_LOOPBACK_CMD         0
+#define INT_DEF_LB_ECHO_CMD             1
+
+/* BSG Vendor specific definations */
+typedef struct _A84_RESET {
+	uint16_t Flags;
+	uint16_t Reserved;
+#define A84_RESET_FLAG_ENABLE_DIAG_FW   1
+} __attribute__((packed)) A84_RESET, *PA84_RESET;
+
+#define A84_ISSUE_WRITE_TYPE_CMD        0
+#define A84_ISSUE_READ_TYPE_CMD         1
+#define A84_CLEANUP_CMD                 2
+#define A84_ISSUE_RESET_OP_FW           3
+#define A84_ISSUE_RESET_DIAG_FW         4
+#define A84_ISSUE_UPDATE_OPFW_CMD       5
+#define A84_ISSUE_UPDATE_DIAGFW_CMD     6
+
+struct qla84_mgmt_param {
+	union {
+		struct {
+			uint32_t start_addr;
+		} mem; /* for QLA84_MGMT_READ/WRITE_MEM */
+		struct {
+			uint32_t id;
+#define QLA84_MGMT_CONFIG_ID_UIF        1
+#define QLA84_MGMT_CONFIG_ID_FCOE_COS   2
+#define QLA84_MGMT_CONFIG_ID_PAUSE      3
+#define QLA84_MGMT_CONFIG_ID_TIMEOUTS   4
+
+		uint32_t param0;
+		uint32_t param1;
+	} config; /* for QLA84_MGMT_CHNG_CONFIG */
+
+	struct {
+		uint32_t type;
+#define QLA84_MGMT_INFO_CONFIG_LOG_DATA         1 /* Get Config Log Data */
+#define QLA84_MGMT_INFO_LOG_DATA                2 /* Get Log Data */
+#define QLA84_MGMT_INFO_PORT_STAT               3 /* Get Port Statistics */
+#define QLA84_MGMT_INFO_LIF_STAT                4 /* Get LIF Statistics  */
+#define QLA84_MGMT_INFO_ASIC_STAT               5 /* Get ASIC Statistics */
+#define QLA84_MGMT_INFO_CONFIG_PARAMS           6 /* Get Config Parameters */
+#define QLA84_MGMT_INFO_PANIC_LOG               7 /* Get Panic Log */
+
+		uint32_t context;
+/*
+* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
+*/
+#define IC_LOG_DATA_LOG_ID_DEBUG_LOG                    0
+#define IC_LOG_DATA_LOG_ID_LEARN_LOG                    1
+#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG           2
+#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG            3
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG     4
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG      5
+#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG         6
+#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG          7
+#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG               8
+#define IC_LOG_DATA_LOG_ID_DCX_LOG                      9
+
+/*
+* context definitions for QLA84_MGMT_INFO_PORT_STAT
+*/
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0   0
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1   1
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0        2
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1        3
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0         4
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1         5
+
+
+/*
+* context definitions for QLA84_MGMT_INFO_LIF_STAT
+*/
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0     0
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1     1
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0           2
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1           3
+#define IC_LIF_STATISTICS_LIF_NUMBER_CPU                6
+
+		} info; /* for QLA84_MGMT_GET_INFO */
+	} u;
+};
+
+struct qla84_msg_mgmt {
+	uint16_t cmd;
+#define QLA84_MGMT_READ_MEM     0x00
+#define QLA84_MGMT_WRITE_MEM    0x01
+#define QLA84_MGMT_CHNG_CONFIG  0x02
+#define QLA84_MGMT_GET_INFO     0x03
+	uint16_t rsrvd;
+	struct qla84_mgmt_param mgmtp;/* parameters for cmd */
+	uint32_t len; /* bytes in payload following this struct */
+	uint8_t payload[0]; /* payload for cmd */
+};
+
+struct msg_update_fw {
+	/*
+	* diag_fw = 0  operational fw
+	*      otherwise diagnostic fw
+	* offset, len, fw_len are present to overcome the current limitation
+	* of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk
+	* specifies the byte "offset" where it fits in the fw buffer. The
+	* number of bytes in each chunk is specified in "len". "fw_len"
+	* is the total size of fw. The first chunk should start at offset = 0.
+	* When offset+len == fw_len, the fw is written to the HBA.
+	*/
+	uint32_t diag_fw;
+	uint32_t offset;/* start offset */
+	uint32_t len;   /* num bytes in cur xfer */
+	uint32_t fw_len; /* size of fw in bytes */
+	uint8_t fw_bytes[0];
+};
+
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 66a8da5..cebf4f1 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -627,6 +627,39 @@
 	uint32_t rx_len;		/* Data segment 1 length. */
 };
 
+struct els_sts_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System Defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t comp_status;
+
+	uint16_t nport_handle;		/* N_PORT handle. */
+
+	uint16_t reserved_1;
+
+	uint8_t vp_index;
+	uint8_t sof_type;
+
+	uint32_t rx_xchg_address;	/* Receive exchange address. */
+	uint16_t reserved_2;
+
+	uint8_t opcode;
+	uint8_t reserved_3;
+
+	uint8_t port_id[3];
+	uint8_t reserved_4;
+
+	uint16_t reserved_5;
+
+	uint16_t control_flags;		/* Control flags. */
+	uint32_t total_byte_count;
+	uint32_t error_subcode_1;
+	uint32_t error_subcode_2;
+};
 /*
  * ISP queue - Mailbox Command entry structure definition.
  */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 8bc6f53..3a89bc5 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -60,6 +60,8 @@
 extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 
+extern fc_port_t *
+qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
 /*
  * Global Data in qla_os.c source file.
  */
@@ -76,6 +78,7 @@
 extern int ql2xmaxqueues;
 extern int ql2xmultique_tag;
 extern int ql2xfwloadbin;
+extern int ql2xetsenable;
 
 extern int qla2x00_loop_reset(scsi_qla_host_t *);
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -94,7 +97,6 @@
 
 extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
 
-extern void qla2x00_abort_fcport_cmds(fc_port_t *);
 extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
 	struct qla_hw_data *);
 extern void qla2x00_free_host(struct scsi_qla_host *);
@@ -154,6 +156,7 @@
 int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
 						uint16_t, uint16_t, uint8_t);
 extern int qla2x00_start_sp(srb_t *);
+extern void qla2x00_ctx_sp_free(srb_t *);
 
 /*
  * Global Function Prototypes in qla_mbx.c source file.
@@ -426,6 +429,8 @@
 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
+extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
+extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
 
 /*
  * Global Function Prototypes in qla_dfs.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3f8e849..a67b2ba 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -62,7 +62,7 @@
 	ctx->free(sp);
 }
 
-static void
+void
 qla2x00_ctx_sp_free(srb_t *sp)
 {
 	struct srb_ctx *ctx = sp->ctx;
@@ -338,6 +338,16 @@
 	rval = qla2x00_init_rings(vha);
 	ha->flags.chip_reset_done = 1;
 
+	if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
+	/* Issue verify 84xx FW IOCB to complete 84xx initialization */
+		rval = qla84xx_init_chip(vha);
+		if (rval != QLA_SUCCESS) {
+			qla_printk(KERN_ERR, ha,
+				"Unable to initialize ISP84XX.\n");
+		qla84xx_put_chip(vha);
+		}
+	}
+
 	return (rval);
 }
 
@@ -2216,7 +2226,7 @@
  *
  * Returns a pointer to the allocated fcport, or NULL, if none available.
  */
-static fc_port_t *
+fc_port_t *
 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
 {
 	fc_port_t *fcport;
@@ -2900,8 +2910,13 @@
 		if (qla2x00_is_reserved_id(vha, loop_id))
 			continue;
 
-		if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))
+		if (atomic_read(&vha->loop_down_timer) ||
+		    LOOP_TRANSITION(vha)) {
+			atomic_set(&vha->loop_down_timer, 0);
+			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
 			break;
+		}
 
 		if (swl != NULL) {
 			if (last_dev) {
@@ -4877,6 +4892,15 @@
 }
 
 void
-qla81xx_update_fw_options(scsi_qla_host_t *ha)
+qla81xx_update_fw_options(scsi_qla_host_t *vha)
 {
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ql2xetsenable)
+		return;
+
+	/* Enable ETS Burst. */
+	memset(ha->fw_options, 0, sizeof(ha->fw_options));
+	ha->fw_options[2] |= BIT_9;
+	qla2x00_set_fw_options(vha, ha->fw_options);
 }
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c5ccac0..8299a98 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1025,6 +1025,119 @@
 	/* Implicit: mbx->mbx10 = 0. */
 }
 
+static void
+qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
+{
+	struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
+
+        els_iocb->entry_type = ELS_IOCB_TYPE;
+        els_iocb->entry_count = 1;
+        els_iocb->sys_define = 0;
+        els_iocb->entry_status = 0;
+        els_iocb->handle = sp->handle;
+        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+        els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+        els_iocb->vp_index = sp->fcport->vp_idx;
+        els_iocb->sof_type = EST_SOFI3;
+        els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+
+        els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ?
+	    bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code;
+        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
+        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+        els_iocb->control_flags = 0;
+        els_iocb->rx_byte_count =
+            cpu_to_le32(bsg_job->reply_payload.payload_len);
+        els_iocb->tx_byte_count =
+            cpu_to_le32(bsg_job->request_payload.payload_len);
+
+        els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
+            (bsg_job->request_payload.sg_list)));
+        els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
+            (bsg_job->request_payload.sg_list)));
+        els_iocb->tx_len = cpu_to_le32(sg_dma_len
+            (bsg_job->request_payload.sg_list));
+
+        els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
+            (bsg_job->reply_payload.sg_list)));
+        els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
+            (bsg_job->reply_payload.sg_list)));
+        els_iocb->rx_len = cpu_to_le32(sg_dma_len
+            (bsg_job->reply_payload.sg_list));
+}
+
+static void
+qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
+{
+	uint16_t        avail_dsds;
+	uint32_t        *cur_dsd;
+	struct scatterlist *sg;
+	int index;
+	uint16_t tot_dsds;
+        scsi_qla_host_t *vha = sp->fcport->vha;
+	struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
+	int loop_iterartion = 0;
+	int cont_iocb_prsnt = 0;
+	int entry_count = 1;
+
+	ct_iocb->entry_type = CT_IOCB_TYPE;
+        ct_iocb->entry_status = 0;
+        ct_iocb->sys_define = 0;
+        ct_iocb->handle = sp->handle;
+
+	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	ct_iocb->vp_index = sp->fcport->vp_idx;
+        ct_iocb->comp_status = __constant_cpu_to_le16(0);
+
+	ct_iocb->cmd_dsd_count =
+            __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+        ct_iocb->timeout = 0;
+        ct_iocb->rsp_dsd_count =
+            __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+        ct_iocb->rsp_byte_count =
+            cpu_to_le32(bsg_job->reply_payload.payload_len);
+        ct_iocb->cmd_byte_count =
+            cpu_to_le32(bsg_job->request_payload.payload_len);
+        ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
+            (bsg_job->request_payload.sg_list)));
+        ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
+           (bsg_job->request_payload.sg_list)));
+        ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
+            (bsg_job->request_payload.sg_list));
+
+	avail_dsds = 1;
+	cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
+	index = 0;
+	tot_dsds = bsg_job->reply_payload.sg_cnt;
+
+	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
+		dma_addr_t       sle_dma;
+		cont_a64_entry_t *cont_pkt;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			* Five DSDs are available in the Cont.
+			* Type 1 IOCB.
+			       */
+			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			avail_dsds = 5;
+			cont_iocb_prsnt = 1;
+			entry_count++;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		loop_iterartion++;
+		avail_dsds--;
+	}
+        ct_iocb->entry_count = entry_count;
+}
+
 int
 qla2x00_start_sp(srb_t *sp)
 {
@@ -1052,6 +1165,13 @@
 		    qla24xx_logout_iocb(sp, pkt):
 		    qla2x00_logout_iocb(sp, pkt);
 		break;
+	case SRB_ELS_CMD_RPT:
+	case SRB_ELS_CMD_HST:
+		qla24xx_els_iocb(sp, pkt);
+		break;
+	case SRB_CT_CMD:
+		qla24xx_ct_iocb(sp, pkt);
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6fc63b9..ab90329 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -8,6 +8,7 @@
 
 #include <linux/delay.h>
 #include <scsi/scsi_tcq.h>
+#include <scsi/scsi_bsg_fc.h>
 
 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
 static void qla2x00_process_completed_request(struct scsi_qla_host *,
@@ -881,7 +882,9 @@
 		    index);
 		return NULL;
 	}
+
 	req->outstanding_cmds[index] = NULL;
+
 done:
 	return sp;
 }
@@ -982,6 +985,100 @@
 }
 
 static void
+qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+    struct sts_entry_24xx *pkt, int iocb_type)
+{
+	const char func[] = "ELS_CT_IOCB";
+	const char *type;
+	struct qla_hw_data *ha = vha->hw;
+	srb_t *sp;
+	struct srb_bsg *sp_bsg;
+	struct fc_bsg_job *bsg_job;
+	uint16_t comp_status;
+	uint32_t fw_status[3];
+	uint8_t* fw_sts_ptr;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+	sp_bsg = (struct srb_bsg*)sp->ctx;
+	bsg_job = sp_bsg->bsg_job;
+
+	type = NULL;
+	switch (sp_bsg->ctx.type) {
+	case SRB_ELS_CMD_RPT:
+	case SRB_ELS_CMD_HST:
+		type = "els";
+		break;
+	case SRB_CT_CMD:
+		type = "ct pass-through";
+		break;
+	default:
+		qla_printk(KERN_WARNING, ha,
+		    "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
+		    sp_bsg->ctx.type);
+		return;
+	}
+
+	comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
+	fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
+	fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
+
+	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
+	 * fc payload  to the caller
+	 */
+	bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
+
+	if (comp_status != CS_COMPLETE) {
+		if (comp_status == CS_DATA_UNDERRUN) {
+			bsg_job->reply->result = DID_OK << 16;
+			bsg_job->reply->reply_payload_rcv_len =
+				le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
+
+			DEBUG2(qla_printk(KERN_WARNING, ha,
+			    "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
+			    "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
+				vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2],
+				le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count)));
+			fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
+		}
+		else {
+			DEBUG2(qla_printk(KERN_WARNING, ha,
+			    "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
+			    "error subcode 1=0x%x error subcode 2=0x%x.\n",
+				vha->host_no, sp->handle, type, comp_status,
+				le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1),
+				le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2)));
+			bsg_job->reply->result = DID_ERROR << 16;
+			bsg_job->reply->reply_payload_rcv_len = 0;
+			fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
+		}
+		DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
+	}
+	else {
+		bsg_job->reply->result =  DID_OK << 16;;
+		bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+		bsg_job->reply_len = 0;
+	}
+
+	dma_unmap_sg(&ha->pdev->dev,
+	    bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	dma_unmap_sg(&ha->pdev->dev,
+	    bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	if ((sp_bsg->ctx.type == SRB_ELS_CMD_HST) ||
+	    (sp_bsg->ctx.type == SRB_CT_CMD))
+		kfree(sp->fcport);
+	kfree(sp->ctx);
+	mempool_free(sp, ha->srb_mempool);
+	bsg_job->job_done(bsg_job);
+}
+
+static void
 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
     struct logio_entry_24xx *logio)
 {
@@ -1749,6 +1846,13 @@
 			qla24xx_logio_entry(vha, rsp->req,
 			    (struct logio_entry_24xx *)pkt);
 			break;
+                case CT_IOCB_TYPE:
+			qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
+			clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
+			break;
+                case ELS_IOCB_TYPE:
+			qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
+			break;
 		default:
 			/* Type Not Supported. */
 			DEBUG4(printk(KERN_WARNING
@@ -2049,7 +2153,6 @@
 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 		complete(&ha->mbx_intr_comp);
 	}
-
 	return IRQ_HANDLED;
 }
 
@@ -2255,10 +2358,11 @@
 
 	if (ha->flags.msix_enabled)
 		qla24xx_disable_msix(ha);
-	else if (ha->flags.inta_enabled) {
+	else if (ha->flags.msi_enabled) {
 		free_irq(ha->pdev->irq, rsp);
 		pci_disable_msi(ha->pdev);
-	}
+	} else
+		free_irq(ha->pdev->irq, rsp);
 }
 
 
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 056e4d4..6e53bdb 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3636,6 +3636,157 @@
 }
 
 int
+qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	uint32_t iter_cnt = 0x1;
+
+	DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
+	mcp->mb[1] = mreq->options | BIT_6;	// BIT_6 specifies 64 bit addressing
+
+	/* transfer count */
+	mcp->mb[10] = LSW(mreq->transfer_size);
+	mcp->mb[11] = MSW(mreq->transfer_size);
+
+	/* send data address */
+	mcp->mb[14] = LSW(mreq->send_dma);
+	mcp->mb[15] = MSW(mreq->send_dma);
+	mcp->mb[20] = LSW(MSD(mreq->send_dma));
+	mcp->mb[21] = MSW(MSD(mreq->send_dma));
+
+	/* recieve data address */
+	mcp->mb[16] = LSW(mreq->rcv_dma);
+	mcp->mb[17] = MSW(mreq->rcv_dma);
+	mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
+	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
+
+	/* Iteration count */
+	mcp->mb[18] = LSW(iter_cnt);
+	mcp->mb[19] = MSW(iter_cnt);
+
+	mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
+	    MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
+	if (IS_QLA81XX(vha->hw))
+		mcp->out_mb |= MBX_2;
+	mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
+
+	mcp->buf_size = mreq->transfer_size;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		DEBUG2(printk(KERN_WARNING
+		    "(%ld): failed=%x mb[0]=0x%x "
+			"mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x mb[19]=0x%x. \n", vha->host_no, rval,
+			mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]));
+	} else {
+		DEBUG2(printk(KERN_WARNING
+		    "scsi(%ld): done.\n", vha->host_no));
+	}
+
+	/* Copy mailbox information */
+	memcpy( mresp, mcp->mb, 64);
+	mresp[3] = mcp->mb[18];
+	mresp[4] = mcp->mb[19];
+	return rval;
+}
+
+int
+qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
+	mcp->mb[1] = mreq->options | BIT_6;	/* BIT_6 specifies 64bit address */
+	if (IS_QLA81XX(ha))
+		mcp->mb[1] |= BIT_15;
+	mcp->mb[2] = IS_QLA81XX(ha) ? vha->fcoe_fcf_idx : 0;
+	mcp->mb[16] = LSW(mreq->rcv_dma);
+	mcp->mb[17] = MSW(mreq->rcv_dma);
+	mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
+	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
+
+	mcp->mb[10] = LSW(mreq->transfer_size);
+
+	mcp->mb[14] = LSW(mreq->send_dma);
+	mcp->mb[15] = MSW(mreq->send_dma);
+	mcp->mb[20] = LSW(MSD(mreq->send_dma));
+	mcp->mb[21] = MSW(MSD(mreq->send_dma));
+
+	mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
+	    MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
+	if (IS_QLA81XX(ha))
+		mcp->out_mb |= MBX_2;
+
+	mcp->in_mb = MBX_0;
+	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
+		mcp->in_mb |= MBX_1;
+	if (IS_QLA81XX(ha))
+		mcp->in_mb |= MBX_3;
+
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+	mcp->buf_size = mreq->transfer_size;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		DEBUG2(printk(KERN_WARNING
+		    "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n",
+		    vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+	} else {
+		DEBUG2(printk(KERN_WARNING
+		    "scsi(%ld): done.\n", vha->host_no));
+	}
+
+	/* Copy mailbox information */
+	memcpy( mresp, mcp->mb, 32);
+	return rval;
+}
+int
+qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
+    uint16_t *cmd_status)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__,
+		ha->host_no, enable_diagnostic));
+
+	mcp->mb[0] = MBC_ISP84XX_RESET;
+	mcp->mb[1] = enable_diagnostic;
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+	rval = qla2x00_mailbox_command(ha, mcp);
+
+	/* Return mailbox statuses. */
+	*cmd_status = mcp->mb[0];
+	if (rval != QLA_SUCCESS)
+		DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
+			rval));
+	else
+		DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no));
+
+	return rval;
+}
+
+int
 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
 {
 	int rval;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8529eb1..46720b2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -107,6 +107,12 @@
 		" 1 -- load firmware from flash.\n"
 		" 0 -- use default semantics.\n");
 
+int ql2xetsenable;
+module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xetsenable,
+		"Enables firmware ETS burst."
+		"Default is 0 - skip ETS enablement.");
+
 /*
  * SCSI host template entry points
  */
@@ -682,44 +688,6 @@
 	return (return_status);
 }
 
-void
-qla2x00_abort_fcport_cmds(fc_port_t *fcport)
-{
-	int cnt;
-	unsigned long flags;
-	srb_t *sp;
-	scsi_qla_host_t *vha = fcport->vha;
-	struct qla_hw_data *ha = vha->hw;
-	struct req_que *req;
-
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	req = vha->req;
-	for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
-		sp = req->outstanding_cmds[cnt];
-		if (!sp)
-			continue;
-		if (sp->fcport != fcport)
-			continue;
-		if (sp->ctx)
-			continue;
-
-		spin_unlock_irqrestore(&ha->hardware_lock, flags);
-		if (ha->isp_ops->abort_command(sp)) {
-			DEBUG2(qla_printk(KERN_WARNING, ha,
-			"Abort failed --  %lx\n",
-			sp->cmd->serial_number));
-		} else {
-			if (qla2x00_eh_wait_on_command(sp->cmd) !=
-				QLA_SUCCESS)
-				DEBUG2(qla_printk(KERN_WARNING, ha,
-				"Abort failed while waiting --  %lx\n",
-				sp->cmd->serial_number));
-		}
-		spin_lock_irqsave(&ha->hardware_lock, flags);
-	}
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
 /**************************************************************************
 * qla2xxx_eh_abort
 *
@@ -1095,6 +1063,20 @@
 	struct fc_port *fcport;
 	struct qla_hw_data *ha = vha->hw;
 
+	if (ha->flags.enable_target_reset) {
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (fcport->port_type != FCT_TARGET)
+				continue;
+
+			ret = ha->isp_ops->target_reset(fcport, 0, 0);
+			if (ret != QLA_SUCCESS) {
+				DEBUG2_3(printk("%s(%ld): bus_reset failed: "
+				    "target_reset=%d d_id=%x.\n", __func__,
+				    vha->host_no, ret, fcport->d_id.b24));
+			}
+		}
+	}
+
 	if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
 		ret = qla2x00_full_login_lip(vha);
 		if (ret != QLA_SUCCESS) {
@@ -1117,19 +1099,6 @@
 			qla2x00_wait_for_loop_ready(vha);
 	}
 
-	if (ha->flags.enable_target_reset) {
-		list_for_each_entry(fcport, &vha->vp_fcports, list) {
-			if (fcport->port_type != FCT_TARGET)
-				continue;
-
-			ret = ha->isp_ops->target_reset(fcport, 0, 0);
-			if (ret != QLA_SUCCESS) {
-				DEBUG2_3(printk("%s(%ld): bus_reset failed: "
-				    "target_reset=%d d_id=%x.\n", __func__,
-				    vha->host_no, ret, fcport->d_id.b24));
-			}
-		}
-	}
 	/* Issue marker command only when we are going to start the I/O */
 	vha->marker_needed = 1;
 
@@ -1160,8 +1129,19 @@
 					qla2x00_sp_compl(ha, sp);
 				} else {
 					ctx = sp->ctx;
-					del_timer_sync(&ctx->timer);
-					ctx->free(sp);
+					if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) {
+						del_timer_sync(&ctx->timer);
+						ctx->free(sp);
+					} else {
+						struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx;
+						if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT)
+							kfree(sp->fcport);
+						sp_bsg->bsg_job->req->errors = 0;
+						sp_bsg->bsg_job->reply->result = res;
+						sp_bsg->bsg_job->job_done(sp_bsg->bsg_job);
+						kfree(sp->ctx);
+						mempool_free(sp, ha->srb_mempool);
+					}
 				}
 			}
 		}
@@ -1258,7 +1238,7 @@
 		qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
 		break;
 	default:
-		return EOPNOTSUPP;
+		return -EOPNOTSUPP;
 	}
 
 	return sdev->queue_depth;
@@ -1818,7 +1798,6 @@
 	/* Set EEH reset type to fundamental if required by hba */
 	if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
 		pdev->needs_freset = 1;
-		pci_save_state(pdev);
 	}
 
 	/* Configure PCI I/O space */
@@ -1970,11 +1949,15 @@
 	host->max_channel = MAX_BUSES - 1;
 	host->max_lun = MAX_LUNS;
 	host->transportt = qla2xxx_transport_template;
+	sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
 
 	/* Set up the irqs */
 	ret = qla2x00_request_irqs(ha, rsp);
 	if (ret)
 		goto probe_init_failed;
+
+	pci_save_state(pdev);
+
 	/* Alloc arrays of request and response ring ptrs */
 que_init:
 	if (!qla2x00_alloc_queues(ha)) {
@@ -2176,6 +2159,8 @@
 	kfree(ha);
 	ha = NULL;
 
+	pci_disable_pcie_error_reporting(pdev);
+
 	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
 }
@@ -3310,6 +3295,7 @@
 		return PCI_ERS_RESULT_CAN_RECOVER;
 	case pci_channel_io_frozen:
 		ha->flags.eeh_busy = 1;
+		qla2x00_free_irqs(vha);
 		pci_disable_device(pdev);
 		return PCI_ERS_RESULT_NEED_RESET;
 	case pci_channel_io_perm_failure:
@@ -3363,10 +3349,24 @@
 	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
 	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
 	struct qla_hw_data *ha = base_vha->hw;
-	int rc;
+	struct rsp_que *rsp;
+	int rc, retries = 10;
 
 	DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
 
+	/* Workaround: qla2xxx driver which access hardware earlier
+	 * needs error state to be pci_channel_io_online.
+	 * Otherwise mailbox command timesout.
+	 */
+	pdev->error_state = pci_channel_io_normal;
+
+	pci_restore_state(pdev);
+
+	/* pci_restore_state() clears the saved_state flag of the device
+	 * save restored state which resets saved_state flag
+	 */
+	pci_save_state(pdev);
+
 	if (ha->mem_only)
 		rc = pci_enable_device_mem(pdev);
 	else
@@ -3378,27 +3378,23 @@
 		return ret;
 	}
 
+	rsp = ha->rsp_q_map[0];
+	if (qla2x00_request_irqs(ha, rsp))
+		return ret;
+
 	if (ha->isp_ops->pci_config(base_vha))
 		return ret;
 
-#ifdef QL_DEBUG_LEVEL_17
-	{
-		uint8_t b;
-		uint32_t i;
+	while (ha->flags.mbox_busy && retries--)
+		msleep(1000);
 
-		printk("slot_reset_1: ");
-		for (i = 0; i < 256; i++) {
-			pci_read_config_byte(ha->pdev, i, &b);
-			printk("%s%02x", (i%16) ? " " : "\n", b);
-		}
-		printk("\n");
-	}
-#endif
 	set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
 	if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
 		ret =  PCI_ERS_RESULT_RECOVERED;
 	clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
 
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+
 	DEBUG17(qla_printk(KERN_WARNING, ha,
 	    "slot_reset-return:ret=%x\n", ret));
 
@@ -3422,8 +3418,6 @@
 	}
 
 	ha->flags.eeh_busy = 0;
-
-	pci_cleanup_aer_uncorrect_error_status(pdev);
 }
 
 static struct pci_error_handlers qla2xxx_err_handler = {
@@ -3536,4 +3530,3 @@
 MODULE_FIRMWARE(FW_FILE_ISP2322);
 MODULE_FIRMWARE(FW_FILE_ISP24XX);
 MODULE_FIRMWARE(FW_FILE_ISP25XX);
-MODULE_FIRMWARE(FW_FILE_ISP81XX);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ed36279..8d2fc2f 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.01-k10"
+#define QLA2XXX_VERSION      "8.03.02-k1"
 
 #define QLA_DRIVER_MAJOR_VER	8
 #define QLA_DRIVER_MINOR_VER	3
-#define QLA_DRIVER_PATCH_VER	1
-#define QLA_DRIVER_BETA_VER	0
+#define QLA_DRIVER_PATCH_VER	2
+#define QLA_DRIVER_BETA_VER	1
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index af8c323..92329a4 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -844,10 +844,10 @@
 	DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no,
 		      __func__));
 	if (ql4xxx_lock_flash(ha) != QLA_SUCCESS)
-		return (QLA_ERROR);
+		return QLA_ERROR;
 	if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) {
 		ql4xxx_unlock_flash(ha);
-		return (QLA_ERROR);
+		return QLA_ERROR;
 	}
 
 	/* Get EEPRom Parameters from NVRAM and validate */
@@ -858,20 +858,18 @@
 			rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha));
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	} else {
-		/*
-		 * QLogic adapters should always have a valid NVRAM.
-		 * If not valid, do not load.
-		 */
 		dev_warn(&ha->pdev->dev,
 			   "scsi%ld: %s: EEProm checksum invalid.  "
 			   "Please update your EEPROM\n", ha->host_no,
 			   __func__);
 
-		/* set defaults */
+		/* Attempt to set defaults */
 		if (is_qla4010(ha))
 			extHwConfig.Asuint32_t = 0x1912;
 		else if (is_qla4022(ha) | is_qla4032(ha))
 			extHwConfig.Asuint32_t = 0x0023;
+		else
+			return QLA_ERROR;
 	}
 	DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
 		     ha->host_no, __func__, extHwConfig.Asuint32_t));
@@ -884,7 +882,7 @@
 	ql4xxx_unlock_nvram(ha);
 	ql4xxx_unlock_flash(ha);
 
-	return (QLA_SUCCESS);
+	return QLA_SUCCESS;
 }
 
 static void qla4x00_pci_config(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 8e5c169..bd88349 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -149,6 +149,7 @@
 	{ RAID_LEVEL_0, "raid0" },
 	{ RAID_LEVEL_1, "raid1" },
 	{ RAID_LEVEL_10, "raid10" },
+	{ RAID_LEVEL_1E, "raid1e" },
 	{ RAID_LEVEL_3, "raid3" },
 	{ RAID_LEVEL_4, "raid4" },
 	{ RAID_LEVEL_5, "raid5" },
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a60da55..513661f 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1026,55 +1026,39 @@
  * responsible for calling kfree() on this pointer when it is no longer
  * needed.  If we cannot retrieve the VPD page this routine returns %NULL.
  */
-unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page)
+int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
+		      int buf_len)
 {
 	int i, result;
-	unsigned int len;
-	const unsigned int init_vpd_len = 255;
-	unsigned char *buf = kmalloc(init_vpd_len, GFP_KERNEL);
-
-	if (!buf)
-		return NULL;
 
 	/* Ask for all the pages supported by this device */
-	result = scsi_vpd_inquiry(sdev, buf, 0, init_vpd_len);
+	result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
 	if (result)
 		goto fail;
 
 	/* If the user actually wanted this page, we can skip the rest */
 	if (page == 0)
-		return buf;
+		return -EINVAL;
 
-	for (i = 0; i < buf[3]; i++)
+	for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
 		if (buf[i + 4] == page)
 			goto found;
+
+	if (i < buf[3] && i > buf_len)
+		/* ran off the end of the buffer, give us benefit of doubt */
+		goto found;
 	/* The device claims it doesn't support the requested page */
 	goto fail;
 
  found:
-	result = scsi_vpd_inquiry(sdev, buf, page, 255);
+	result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
 	if (result)
 		goto fail;
 
-	/*
-	 * Some pages are longer than 255 bytes.  The actual length of
-	 * the page is returned in the header.
-	 */
-	len = ((buf[2] << 8) | buf[3]) + 4;
-	if (len <= init_vpd_len)
-		return buf;
-
-	kfree(buf);
-	buf = kmalloc(len, GFP_KERNEL);
-	result = scsi_vpd_inquiry(sdev, buf, page, len);
-	if (result)
-		goto fail;
-
-	return buf;
+	return 0;
 
  fail:
-	kfree(buf);
-	return NULL;
+	return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
 
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c664242..5697709 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -773,8 +773,14 @@
 	 * we already took a copy of the original into rq->errors which
 	 * is what gets returned to the user
 	 */
-	if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
-		if (!(req->cmd_flags & REQ_QUIET))
+	if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
+		/* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
+		 * print since caller wants ATA registers. Only occurs on
+		 * SCSI ATA PASS_THROUGH commands when CK_COND=1
+		 */
+		if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
+			;
+		else if (!(req->cmd_flags & REQ_QUIET))
 			scsi_print_sense("", cmd);
 		result = 0;
 		/* BLOCK_PC may have set error */
diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h
index 998cb5b..6266a5d 100644
--- a/drivers/scsi/scsi_sas_internal.h
+++ b/drivers/scsi/scsi_sas_internal.h
@@ -5,7 +5,7 @@
 #define SAS_PHY_ATTRS		17
 #define SAS_PORT_ATTRS		1
 #define SAS_RPORT_ATTRS		7
-#define SAS_END_DEV_ATTRS	3
+#define SAS_END_DEV_ATTRS	5
 #define SAS_EXPANDER_ATTRS	7
 
 struct sas_internal {
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 012f73a..f697229 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1339,8 +1339,10 @@
 		sdev = scsi_alloc_sdev(starget, 0, NULL);
 		if (!sdev)
 			return 0;
-		if (scsi_device_get(sdev))
+		if (scsi_device_get(sdev)) {
+			__scsi_remove_device(sdev);
 			return 0;
+		}
 	}
 
 	sprintf(devname, "host %d channel %d id %d",
@@ -1907,10 +1909,9 @@
 		goto out;
 
 	sdev = scsi_alloc_sdev(starget, 0, NULL);
-	if (sdev) {
-		sdev->sdev_gendev.parent = get_device(&starget->dev);
+	if (sdev)
 		sdev->borken = 0;
-	} else
+	else
 		scsi_target_reap(starget);
 	put_device(&starget->dev);
  out:
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 5a06505..a4936c4 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -878,7 +878,8 @@
 	struct request_queue *rq = sdev->request_queue;
 	struct scsi_target *starget = sdev->sdev_target;
 
-	if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
+	error = scsi_device_set_state(sdev, SDEV_RUNNING);
+	if (error)
 		return error;
 
 	error = scsi_target_add(starget);
@@ -889,13 +890,13 @@
 	error = device_add(&sdev->sdev_gendev);
 	if (error) {
 		printk(KERN_INFO "error 1\n");
-		goto out_remove;
+		return error;
 	}
 	error = device_add(&sdev->sdev_dev);
 	if (error) {
 		printk(KERN_INFO "error 2\n");
 		device_del(&sdev->sdev_gendev);
-		goto out_remove;
+		return error;
 	}
 	transport_add_device(&sdev->sdev_gendev);
 	sdev->is_visible = 1;
@@ -910,14 +911,14 @@
 	else
 		error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
 	if (error)
-		goto out_remove;
+		return error;
 
 	if (sdev->host->hostt->change_queue_type)
 		error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
 	else
 		error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
 	if (error)
-		goto out_remove;
+		return error;
 
 	error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
 
@@ -933,16 +934,11 @@
 			error = device_create_file(&sdev->sdev_gendev,
 					sdev->host->hostt->sdev_attrs[i]);
 			if (error)
-				goto out_remove;
+				return error;
 		}
 	}
 
-	return 0;
-
- out_remove:
-	__scsi_remove_device(sdev);
 	return error;
-
 }
 
 void __scsi_remove_device(struct scsi_device *sdev)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 653f22a..79660ee 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -475,7 +475,8 @@
 		 "Maximum number of seconds that the FC transport should"
 		 " insulate the loss of a remote port. Once this value is"
 		 " exceeded, the scsi target is removed. Value should be"
-		 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT.");
+		 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
+		 " fast_io_fail_tmo is not set.");
 
 /*
  * Netlink Infrastructure
@@ -842,9 +843,17 @@
 	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))
 		return -EBUSY;
 	val = simple_strtoul(buf, &cp, 0);
-	if ((*cp && (*cp != '\n')) ||
-	    (val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
+	if ((*cp && (*cp != '\n')) || (val < 0))
 		return -EINVAL;
+
+	/*
+	 * If fast_io_fail is off we have to cap
+	 * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
+	 */
+	if (rport->fast_io_fail_tmo == -1 &&
+	    val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
+		return -EINVAL;
+
 	i->f->set_rport_dev_loss_tmo(rport, val);
 	return count;
 }
@@ -925,9 +934,16 @@
 		rport->fast_io_fail_tmo = -1;
 	else {
 		val = simple_strtoul(buf, &cp, 0);
-		if ((*cp && (*cp != '\n')) ||
-		    (val < 0) || (val >= rport->dev_loss_tmo))
+		if ((*cp && (*cp != '\n')) || (val < 0))
 			return -EINVAL;
+		/*
+		 * Cap fast_io_fail by dev_loss_tmo or
+		 * SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
+		 */
+		if ((val >= rport->dev_loss_tmo) ||
+		    (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
+			return -EINVAL;
+
 		rport->fast_io_fail_tmo = val;
 	}
 	return count;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index f27e52d..927e99c 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -155,6 +155,17 @@
 sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
 sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
 
+static struct sas_end_device *sas_sdev_to_rdev(struct scsi_device *sdev)
+{
+	struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target);
+	struct sas_end_device *rdev;
+
+	BUG_ON(rphy->identify.device_type != SAS_END_DEVICE);
+
+	rdev = rphy_to_end_device(rphy);
+	return rdev;
+}
+
 static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
 			    struct sas_rphy *rphy)
 {
@@ -358,6 +369,85 @@
 }
 EXPORT_SYMBOL(sas_remove_host);
 
+/**
+ * sas_tlr_supported - checking TLR bit in vpd 0x90
+ * @sdev: scsi device struct
+ *
+ * Check Transport Layer Retries are supported or not.
+ * If vpd page 0x90 is present, TRL is supported.
+ *
+ */
+unsigned int
+sas_tlr_supported(struct scsi_device *sdev)
+{
+	const int vpd_len = 32;
+	struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+	char *buffer = kzalloc(vpd_len, GFP_KERNEL);
+	int ret = 0;
+
+	if (scsi_get_vpd_page(sdev, 0x90, buffer, vpd_len))
+		goto out;
+
+	/*
+	 * Magic numbers: the VPD Protocol page (0x90)
+	 * has a 4 byte header and then one entry per device port
+	 * the TLR bit is at offset 8 on each port entry
+	 * if we take the first port, that's at total offset 12
+	 */
+	ret = buffer[12] & 0x01;
+
+ out:
+	kfree(buffer);
+	rdev->tlr_supported = ret;
+	return ret;
+
+}
+EXPORT_SYMBOL_GPL(sas_tlr_supported);
+
+/**
+ * sas_disable_tlr - setting TLR flags
+ * @sdev: scsi device struct
+ *
+ * Seting tlr_enabled flag to 0.
+ *
+ */
+void
+sas_disable_tlr(struct scsi_device *sdev)
+{
+	struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+
+	rdev->tlr_enabled = 0;
+}
+EXPORT_SYMBOL_GPL(sas_disable_tlr);
+
+/**
+ * sas_enable_tlr - setting TLR flags
+ * @sdev: scsi device struct
+ *
+ * Seting tlr_enabled flag 1.
+ *
+ */
+void sas_enable_tlr(struct scsi_device *sdev)
+{
+	unsigned int tlr_supported = 0;
+	tlr_supported  = sas_tlr_supported(sdev);
+
+	if (tlr_supported) {
+		struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+
+		rdev->tlr_enabled = 1;
+	}
+
+	return;
+}
+EXPORT_SYMBOL_GPL(sas_enable_tlr);
+
+unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
+{
+	struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+	return rdev->tlr_enabled;
+}
+EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
 
 /*
  * SAS Phy attributes
@@ -1146,15 +1236,10 @@
 int sas_read_port_mode_page(struct scsi_device *sdev)
 {
 	char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata;
-	struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target);
-	struct sas_end_device *rdev;
+	struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
 	struct scsi_mode_data mode_data;
 	int res, error;
 
-	BUG_ON(rphy->identify.device_type != SAS_END_DEVICE);
-
-	rdev = rphy_to_end_device(rphy);
-
 	if (!buffer)
 		return -ENOMEM;
 
@@ -1207,6 +1292,10 @@
 			"%d\n", int);
 sas_end_dev_simple_attr(initiator_response_timeout, initiator_response_timeout,
 			"%d\n", int);
+sas_end_dev_simple_attr(tlr_supported, tlr_supported,
+			"%d\n", int);
+sas_end_dev_simple_attr(tlr_enabled, tlr_enabled,
+			"%d\n", int);
 
 static DECLARE_TRANSPORT_CLASS(sas_expander_class,
 			       "sas_expander", NULL, NULL, NULL);
@@ -1733,6 +1822,8 @@
 	SETUP_END_DEV_ATTRIBUTE(end_dev_ready_led_meaning);
 	SETUP_END_DEV_ATTRIBUTE(end_dev_I_T_nexus_loss_timeout);
 	SETUP_END_DEV_ATTRIBUTE(end_dev_initiator_response_timeout);
+	SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_supported);
+	SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_enabled);
 	i->end_dev_attrs[count] = NULL;
 
 	count = 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 255da53..1dd4d84 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1196,19 +1196,10 @@
 		SCpnt->result = 0;
 		memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
 		break;
-	case ABORTED_COMMAND:
-		if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */
-			scsi_print_result(SCpnt);
-			scsi_print_sense("sd", SCpnt);
+	case ABORTED_COMMAND: /* DIF: Target detected corruption */
+	case ILLEGAL_REQUEST: /* DIX: Host detected corruption */
+		if (sshdr.asc == 0x10)
 			good_bytes = sd_completed_bytes(SCpnt);
-		}
-		break;
-	case ILLEGAL_REQUEST:
-		if (sshdr.asc == 0x10) { /* DIX: HBA detected corruption */
-			scsi_print_result(SCpnt);
-			scsi_print_sense("sd", SCpnt);
-			good_bytes = sd_completed_bytes(SCpnt);
-		}
 		break;
 	default:
 		break;
@@ -1218,8 +1209,19 @@
 		sd_dif_complete(SCpnt, good_bytes);
 
 	if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
-	    == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd)
+	    == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
+
+		/* We have to print a failed command here as the
+		 * extended CDB gets freed before scsi_io_completion()
+		 * is called.
+		 */
+		if (result)
+			scsi_print_command(SCpnt);
+
 		mempool_free(SCpnt->cmnd, sd_cdb_pool);
+		SCpnt->cmnd = NULL;
+		SCpnt->cmd_len = 0;
+	}
 
 	return good_bytes;
 }
@@ -1946,13 +1948,13 @@
 {
 	struct request_queue *q = sdkp->disk->queue;
 	unsigned int sector_sz = sdkp->device->sector_size;
-	char *buffer;
+	const int vpd_len = 32;
+	unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
 
-	/* Block Limits VPD */
-	buffer = scsi_get_vpd_page(sdkp->device, 0xb0);
-
-	if (buffer == NULL)
-		return;
+	if (!buffer ||
+	    /* Block Limits VPD */
+	    scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
+		goto out;
 
 	blk_queue_io_min(sdkp->disk->queue,
 			 get_unaligned_be16(&buffer[6]) * sector_sz);
@@ -1984,6 +1986,7 @@
 				get_unaligned_be32(&buffer[32]) & ~(1 << 31);
 	}
 
+ out:
 	kfree(buffer);
 }
 
@@ -1993,20 +1996,23 @@
  */
 static void sd_read_block_characteristics(struct scsi_disk *sdkp)
 {
-	char *buffer;
+	unsigned char *buffer;
 	u16 rot;
+	const int vpd_len = 32;
 
-	/* Block Device Characteristics VPD */
-	buffer = scsi_get_vpd_page(sdkp->device, 0xb1);
+	buffer = kmalloc(vpd_len, GFP_KERNEL);
 
-	if (buffer == NULL)
-		return;
+	if (!buffer ||
+	    /* Block Device Characteristics VPD */
+	    scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
+		goto out;
 
 	rot = get_unaligned_be16(&buffer[4]);
 
 	if (rot == 1)
 		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
 
+ out:
 	kfree(buffer);
 }
 
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 55b034b..1d7a878 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -448,13 +448,17 @@
 		.addr = 0,
 	};
 
-	buf = scsi_get_vpd_page(sdev, 0x83);
-	if (!buf)
-		return;
+	buf = kmalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
+	if (!buf || scsi_get_vpd_page(sdev, 0x83, buf, INIT_ALLOC_SIZE))
+		goto free;
 
 	ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
 
 	vpd_len = ((buf[2] << 8) | buf[3]) + 4;
+	kfree(buf);
+	buf = kmalloc(vpd_len, GFP_KERNEL);
+	if (!buf ||scsi_get_vpd_page(sdev, 0x83, buf, vpd_len))
+		goto free;
 
 	desc = buf + 4;
 	while (desc < buf + vpd_len) {
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 54023d41..26e8e0e 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1070,7 +1070,7 @@
    char *cur = str;
    int i = 1;
 
-   while (cur && isdigit(*cur) && i <= MAX_INT_PARAM) {
+   while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
       ints[i++] = simple_strtoul(cur, NULL, 0);
 
       if ((cur = strchr(cur, ',')) != NULL) cur++;
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index d2604c8..e4ac582 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1069,7 +1069,8 @@
 		free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
 }
 
-static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq)
+static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
+			     unsigned int *irq)
 {
 	struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
 	int ret;
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index 683e66f..3e2ae48 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -2031,9 +2031,9 @@
 	/*
 	 * XServe's default to 57600 bps
 	 */
-	if (machine_is_compatible("RackMac1,1")
-	    || machine_is_compatible("RackMac1,2")
-	    || machine_is_compatible("MacRISC4"))
+	if (of_machine_is_compatible("RackMac1,1")
+	    || of_machine_is_compatible("RackMac1,2")
+	    || of_machine_is_compatible("MacRISC4"))
 	 	baud = 57600;
 
 	/*
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 0efcded..f7d2589 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -518,34 +518,6 @@
 {
 	if (port->mapbase == 0xfffffe80)
 		return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */
-	if (port->mapbase == 0xa4000150)
-		return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xa4000140)
-		return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == SCIF0)
-		return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
-	if (port->mapbase == SCIF2)
-		return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	  return sci_in(port,SCxSR)&0x0010 ? 1 : 0;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7721)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xa4430000)
-		return sci_in(port, SCxSR) & 0x0003 ? 1 : 0;
-	else if (port->mapbase == 0xa4438000)
-		return sci_in(port, SCxSR) & 0x0003 ? 1 : 0;
 	return 1;
 }
 #elif defined(CONFIG_CPU_SUBTYPE_SH7750)  || \
@@ -558,207 +530,17 @@
 {
 	if (port->mapbase == 0xffe00000)
 		return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
-	if (port->mapbase == 0xffe80000)
-		return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
 	return 1;
 }
-#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xffe80000)
-		return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xfe4b0000)
-		return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0;
-	if (port->mapbase == 0xfe4c0000)
-		return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0;
-	if (port->mapbase == 0xfe4d0000)
-		return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xfe600000)
-		return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xfe610000)
-		return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xfe620000)
-		return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xffe00000)
-		return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffe10000)
-		return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffe20000)
-		return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffe30000)
-		return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xffe00000)
-		return __raw_readb(SCPDR0) & 0x0001 ? 1 : 0; /* SCIF0 */
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xffe00000)
-		return __raw_readb(PSDR) & 0x02 ? 1 : 0; /* SCIF0 */
-	if (port->mapbase == 0xffe10000)
-		return __raw_readb(PADR) & 0x40 ? 1 : 0; /* SCIF1 */
-	if (port->mapbase == 0xffe20000)
-		return __raw_readb(PWDR) & 0x04 ? 1 : 0; /* SCIF2 */
-
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-        if (port->mapbase == 0xffe00000)
-                return __raw_readb(SCSPTR0) & 0x0008 ? 1 : 0; /* SCIF0 */
-        if (port->mapbase == 0xffe10000)
-                return __raw_readb(SCSPTR1) & 0x0020 ? 1 : 0; /* SCIF1 */
-        if (port->mapbase == 0xffe20000)
-                return __raw_readb(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF2 */
-        if (port->mapbase == 0xa4e30000)
-                return __raw_readb(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF3 */
-        if (port->mapbase == 0xa4e40000)
-                return __raw_readb(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF4 */
-        if (port->mapbase == 0xa4e50000)
-                return __raw_readb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */
-        return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
-#  define SCFSR    0x0010
-#  define SCASSR   0x0014
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->type == PORT_SCIF)
-		return __raw_readw((port->mapbase + SCFSR))  & SCIF_BRK ? 1 : 0;
-	if (port->type == PORT_SCIFA)
-		return __raw_readw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0;
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-         return sci_in(port, SCSPTR)&0x0001 ? 1 : 0; /* SCIF */
-}
 #elif defined(__H8300H__) || defined(__H8300S__)
 static inline int sci_rxd_in(struct uart_port *port)
 {
 	int ch = (port->mapbase - SMR0) >> 3;
 	return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0;
 }
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
+#else /* default case for non-SCI processors */
 static inline int sci_rxd_in(struct uart_port *port)
 {
-	if (port->mapbase == 0xffe00000)
-		return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffe08000)
-		return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffe10000)
-		return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF/IRDA */
-
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xff923000)
-		return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xff924000)
-		return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xff925000)
-		return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xffe00000)
-		return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffe10000)
-		return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7786)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xffea0000)
-		return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffeb0000)
-		return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffec0000)
-		return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffed0000)
-		return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffee0000)
-		return __raw_readw(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffef0000)
-		return __raw_readw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7203) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7206) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7263)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xfffe8000)
-		return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xfffe8800)
-		return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xfffe9000)
-		return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xfffe9800)
-		return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
-#if defined(CONFIG_CPU_SUBTYPE_SH7201)
-	if (port->mapbase == 0xfffeA000)
-		return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xfffeA800)
-		return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xfffeB000)
-		return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xfffeB800)
-		return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
-#endif
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xf8400000)
-		return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xf8410000)
-		return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xf8420000)
-		return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
-	return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
-static inline int sci_rxd_in(struct uart_port *port)
-{
-	if (port->mapbase == 0xffc30000)
-		return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffc40000)
-		return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffc50000)
-		return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
-	if (port->mapbase == 0xffc60000)
-		return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
 	return 1;
 }
 #endif
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index d5d7f23..3a5a17d 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -259,6 +259,43 @@
 	}
 }
 
+static void (*intc_enable_noprio_fns[])(unsigned long addr,
+					unsigned long handle,
+					void (*fn)(unsigned long,
+						   unsigned long,
+						   unsigned long),
+					unsigned int irq) = {
+	[MODE_ENABLE_REG] = intc_mode_field,
+	[MODE_MASK_REG] = intc_mode_zero,
+	[MODE_DUAL_REG] = intc_mode_field,
+	[MODE_PRIO_REG] = intc_mode_field,
+	[MODE_PCLR_REG] = intc_mode_field,
+};
+
+static void intc_enable_disable(struct intc_desc_int *d,
+				unsigned long handle, int do_enable)
+{
+	unsigned long addr;
+	unsigned int cpu;
+	void (*fn)(unsigned long, unsigned long,
+		   void (*)(unsigned long, unsigned long, unsigned long),
+		   unsigned int);
+
+	if (do_enable) {
+		for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
+			addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
+			fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
+			fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
+		}
+	} else {
+		for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
+			addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
+			fn = intc_disable_fns[_INTC_MODE(handle)];
+			fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
+		}
+	}
+}
+
 static int intc_set_wake(unsigned int irq, unsigned int on)
 {
 	return 0; /* allow wakeup, but setup hardware in intc_suspend() */
@@ -400,11 +437,11 @@
 static intc_enum __init intc_grp_id(struct intc_desc *desc,
 				    intc_enum enum_id)
 {
-	struct intc_group *g = desc->groups;
+	struct intc_group *g = desc->hw.groups;
 	unsigned int i, j;
 
-	for (i = 0; g && enum_id && i < desc->nr_groups; i++) {
-		g = desc->groups + i;
+	for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
+		g = desc->hw.groups + i;
 
 		for (j = 0; g->enum_ids[j]; j++) {
 			if (g->enum_ids[j] != enum_id)
@@ -417,19 +454,21 @@
 	return 0;
 }
 
-static unsigned int __init intc_mask_data(struct intc_desc *desc,
-					  struct intc_desc_int *d,
-					  intc_enum enum_id, int do_grps)
+static unsigned int __init _intc_mask_data(struct intc_desc *desc,
+					   struct intc_desc_int *d,
+					   intc_enum enum_id,
+					   unsigned int *reg_idx,
+					   unsigned int *fld_idx)
 {
-	struct intc_mask_reg *mr = desc->mask_regs;
-	unsigned int i, j, fn, mode;
+	struct intc_mask_reg *mr = desc->hw.mask_regs;
+	unsigned int fn, mode;
 	unsigned long reg_e, reg_d;
 
-	for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) {
-		mr = desc->mask_regs + i;
+	while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
+		mr = desc->hw.mask_regs + *reg_idx;
 
-		for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
-			if (mr->enum_ids[j] != enum_id)
+		for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
+			if (mr->enum_ids[*fld_idx] != enum_id)
 				continue;
 
 			if (mr->set_reg && mr->clr_reg) {
@@ -455,29 +494,49 @@
 					intc_get_reg(d, reg_e),
 					intc_get_reg(d, reg_d),
 					1,
-					(mr->reg_width - 1) - j);
+					(mr->reg_width - 1) - *fld_idx);
 		}
+
+		*fld_idx = 0;
+		(*reg_idx)++;
 	}
 
+	return 0;
+}
+
+static unsigned int __init intc_mask_data(struct intc_desc *desc,
+					  struct intc_desc_int *d,
+					  intc_enum enum_id, int do_grps)
+{
+	unsigned int i = 0;
+	unsigned int j = 0;
+	unsigned int ret;
+
+	ret = _intc_mask_data(desc, d, enum_id, &i, &j);
+	if (ret)
+		return ret;
+
 	if (do_grps)
 		return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
 
 	return 0;
 }
 
-static unsigned int __init intc_prio_data(struct intc_desc *desc,
-					  struct intc_desc_int *d,
-					  intc_enum enum_id, int do_grps)
+static unsigned int __init _intc_prio_data(struct intc_desc *desc,
+					   struct intc_desc_int *d,
+					   intc_enum enum_id,
+					   unsigned int *reg_idx,
+					   unsigned int *fld_idx)
 {
-	struct intc_prio_reg *pr = desc->prio_regs;
-	unsigned int i, j, fn, mode, bit;
+	struct intc_prio_reg *pr = desc->hw.prio_regs;
+	unsigned int fn, n, mode, bit;
 	unsigned long reg_e, reg_d;
 
-	for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) {
-		pr = desc->prio_regs + i;
+	while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
+		pr = desc->hw.prio_regs + *reg_idx;
 
-		for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) {
-			if (pr->enum_ids[j] != enum_id)
+		for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
+			if (pr->enum_ids[*fld_idx] != enum_id)
 				continue;
 
 			if (pr->set_reg && pr->clr_reg) {
@@ -495,34 +554,79 @@
 			}
 
 			fn += (pr->reg_width >> 3) - 1;
+			n = *fld_idx + 1;
 
-			BUG_ON((j + 1) * pr->field_width > pr->reg_width);
+			BUG_ON(n * pr->field_width > pr->reg_width);
 
-			bit = pr->reg_width - ((j + 1) * pr->field_width);
+			bit = pr->reg_width - (n * pr->field_width);
 
 			return _INTC_MK(fn, mode,
 					intc_get_reg(d, reg_e),
 					intc_get_reg(d, reg_d),
 					pr->field_width, bit);
 		}
+
+		*fld_idx = 0;
+		(*reg_idx)++;
 	}
 
+	return 0;
+}
+
+static unsigned int __init intc_prio_data(struct intc_desc *desc,
+					  struct intc_desc_int *d,
+					  intc_enum enum_id, int do_grps)
+{
+	unsigned int i = 0;
+	unsigned int j = 0;
+	unsigned int ret;
+
+	ret = _intc_prio_data(desc, d, enum_id, &i, &j);
+	if (ret)
+		return ret;
+
 	if (do_grps)
 		return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
 
 	return 0;
 }
 
+static void __init intc_enable_disable_enum(struct intc_desc *desc,
+					    struct intc_desc_int *d,
+					    intc_enum enum_id, int enable)
+{
+	unsigned int i, j, data;
+
+	/* go through and enable/disable all mask bits */
+	i = j = 0;
+	do {
+		data = _intc_mask_data(desc, d, enum_id, &i, &j);
+		if (data)
+			intc_enable_disable(d, data, enable);
+		j++;
+	} while (data);
+
+	/* go through and enable/disable all priority fields */
+	i = j = 0;
+	do {
+		data = _intc_prio_data(desc, d, enum_id, &i, &j);
+		if (data)
+			intc_enable_disable(d, data, enable);
+
+		j++;
+	} while (data);
+}
+
 static unsigned int __init intc_ack_data(struct intc_desc *desc,
 					  struct intc_desc_int *d,
 					  intc_enum enum_id)
 {
-	struct intc_mask_reg *mr = desc->ack_regs;
+	struct intc_mask_reg *mr = desc->hw.ack_regs;
 	unsigned int i, j, fn, mode;
 	unsigned long reg_e, reg_d;
 
-	for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) {
-		mr = desc->ack_regs + i;
+	for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
+		mr = desc->hw.ack_regs + i;
 
 		for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
 			if (mr->enum_ids[j] != enum_id)
@@ -549,11 +653,11 @@
 					   struct intc_desc_int *d,
 					   intc_enum enum_id)
 {
-	struct intc_sense_reg *sr = desc->sense_regs;
+	struct intc_sense_reg *sr = desc->hw.sense_regs;
 	unsigned int i, j, fn, bit;
 
-	for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) {
-		sr = desc->sense_regs + i;
+	for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
+		sr = desc->hw.sense_regs + i;
 
 		for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
 			if (sr->enum_ids[j] != enum_id)
@@ -656,7 +760,7 @@
 	/* irq should be disabled by default */
 	d->chip.mask(irq);
 
-	if (desc->ack_regs)
+	if (desc->hw.ack_regs)
 		ack_handle[irq] = intc_ack_data(desc, d, enum_id);
 }
 
@@ -684,6 +788,7 @@
 void __init register_intc_controller(struct intc_desc *desc)
 {
 	unsigned int i, k, smp;
+	struct intc_hw_desc *hw = &desc->hw;
 	struct intc_desc_int *d;
 
 	d = kzalloc(sizeof(*d), GFP_NOWAIT);
@@ -691,10 +796,10 @@
 	INIT_LIST_HEAD(&d->list);
 	list_add(&d->list, &intc_list);
 
-	d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0;
-	d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0;
-	d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0;
-	d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0;
+	d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
+	d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
+	d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
+	d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
 
 	d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
 #ifdef CONFIG_SMP
@@ -702,30 +807,31 @@
 #endif
 	k = 0;
 
-	if (desc->mask_regs) {
-		for (i = 0; i < desc->nr_mask_regs; i++) {
-			smp = IS_SMP(desc->mask_regs[i]);
-			k += save_reg(d, k, desc->mask_regs[i].set_reg, smp);
-			k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp);
+	if (hw->mask_regs) {
+		for (i = 0; i < hw->nr_mask_regs; i++) {
+			smp = IS_SMP(hw->mask_regs[i]);
+			k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
+			k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
 		}
 	}
 
-	if (desc->prio_regs) {
-		d->prio = kzalloc(desc->nr_vectors * sizeof(*d->prio), GFP_NOWAIT);
+	if (hw->prio_regs) {
+		d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
+				  GFP_NOWAIT);
 
-		for (i = 0; i < desc->nr_prio_regs; i++) {
-			smp = IS_SMP(desc->prio_regs[i]);
-			k += save_reg(d, k, desc->prio_regs[i].set_reg, smp);
-			k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp);
+		for (i = 0; i < hw->nr_prio_regs; i++) {
+			smp = IS_SMP(hw->prio_regs[i]);
+			k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
+			k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
 		}
 	}
 
-	if (desc->sense_regs) {
-		d->sense = kzalloc(desc->nr_vectors * sizeof(*d->sense), GFP_NOWAIT);
+	if (hw->sense_regs) {
+		d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
+				   GFP_NOWAIT);
 
-		for (i = 0; i < desc->nr_sense_regs; i++) {
-			k += save_reg(d, k, desc->sense_regs[i].reg, 0);
-		}
+		for (i = 0; i < hw->nr_sense_regs; i++)
+			k += save_reg(d, k, hw->sense_regs[i].reg, 0);
 	}
 
 	d->chip.name = desc->name;
@@ -738,18 +844,26 @@
 	d->chip.set_type = intc_set_sense;
 	d->chip.set_wake = intc_set_wake;
 
-	if (desc->ack_regs) {
-		for (i = 0; i < desc->nr_ack_regs; i++)
-			k += save_reg(d, k, desc->ack_regs[i].set_reg, 0);
+	if (hw->ack_regs) {
+		for (i = 0; i < hw->nr_ack_regs; i++)
+			k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
 
 		d->chip.mask_ack = intc_mask_ack;
 	}
 
+	/* disable bits matching force_disable before registering irqs */
+	if (desc->force_disable)
+		intc_enable_disable_enum(desc, d, desc->force_disable, 0);
+
+	/* disable bits matching force_enable before registering irqs */
+	if (desc->force_enable)
+		intc_enable_disable_enum(desc, d, desc->force_enable, 0);
+
 	BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
 
 	/* register the vectors one by one */
-	for (i = 0; i < desc->nr_vectors; i++) {
-		struct intc_vect *vect = desc->vectors + i;
+	for (i = 0; i < hw->nr_vectors; i++) {
+		struct intc_vect *vect = hw->vectors + i;
 		unsigned int irq = evt2irq(vect->vect);
 		struct irq_desc *irq_desc;
 
@@ -764,8 +878,8 @@
 
 		intc_register_irq(desc, d, vect->enum_id, irq);
 
-		for (k = i + 1; k < desc->nr_vectors; k++) {
-			struct intc_vect *vect2 = desc->vectors + k;
+		for (k = i + 1; k < hw->nr_vectors; k++) {
+			struct intc_vect *vect2 = hw->vectors + k;
 			unsigned int irq2 = evt2irq(vect2->vect);
 
 			if (vect->enum_id != vect2->enum_id)
@@ -785,11 +899,15 @@
 			vect2->enum_id = 0;
 
 			/* redirect this interrupts to the first one */
-			set_irq_chip_and_handler_name(irq2, &d->chip,
-					intc_redirect_irq, "redirect");
+			set_irq_chip(irq2, &dummy_irq_chip);
+			set_irq_chained_handler(irq2, intc_redirect_irq);
 			set_irq_data(irq2, (void *)irq);
 		}
 	}
+
+	/* enable bits matching force_enable after registering irqs */
+	if (desc->force_enable)
+		intc_enable_disable_enum(desc, d, desc->force_enable, 1);
 }
 
 static int intc_suspend(struct sys_device *dev, pm_message_t state)
@@ -872,7 +990,7 @@
 /*
  * Dynamic IRQ allocation and deallocation
  */
-static unsigned int create_irq_on_node(unsigned int irq_want, int node)
+unsigned int create_irq_nr(unsigned int irq_want, int node)
 {
 	unsigned int irq = 0, new;
 	unsigned long flags;
@@ -881,24 +999,28 @@
 	spin_lock_irqsave(&vector_lock, flags);
 
 	/*
-	 * First try the wanted IRQ, then scan.
+	 * First try the wanted IRQ
 	 */
-	if (test_and_set_bit(irq_want, intc_irq_map)) {
+	if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
+		new = irq_want;
+	} else {
+		/* .. then fall back to scanning. */
 		new = find_first_zero_bit(intc_irq_map, nr_irqs);
 		if (unlikely(new == nr_irqs))
 			goto out_unlock;
 
-		desc = irq_to_desc_alloc_node(new, node);
-		if (unlikely(!desc)) {
-			pr_info("can't get irq_desc for %d\n", new);
-			goto out_unlock;
-		}
-
-		desc = move_irq_desc(desc, node);
 		__set_bit(new, intc_irq_map);
-		irq = new;
 	}
 
+	desc = irq_to_desc_alloc_node(new, node);
+	if (unlikely(!desc)) {
+		pr_info("can't get irq_desc for %d\n", new);
+		goto out_unlock;
+	}
+
+	desc = move_irq_desc(desc, node);
+	irq = new;
+
 out_unlock:
 	spin_unlock_irqrestore(&vector_lock, flags);
 
@@ -913,7 +1035,7 @@
 	int nid = cpu_to_node(smp_processor_id());
 	int irq;
 
-	irq = create_irq_on_node(NR_IRQS_LEGACY, nid);
+	irq = create_irq_nr(NR_IRQS_LEGACY, nid);
 	if (irq == 0)
 		irq = -1;
 
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c
index 082604e..cf0303ac 100644
--- a/drivers/sh/pfc.c
+++ b/drivers/sh/pfc.c
@@ -337,12 +337,39 @@
 		if (!enum_id)
 			break;
 
+		/* first check if this is a function enum */
 		in_range = enum_in_range(enum_id, &gpioc->function);
-		if (!in_range && range) {
-			in_range = enum_in_range(enum_id, range);
+		if (!in_range) {
+			/* not a function enum */
+			if (range) {
+				/*
+				 * other range exists, so this pin is
+				 * a regular GPIO pin that now is being
+				 * bound to a specific direction.
+				 *
+				 * for this case we only allow function enums
+				 * and the enums that match the other range.
+				 */
+				in_range = enum_in_range(enum_id, range);
 
-			if (in_range && enum_id == range->force)
-				continue;
+				/*
+				 * special case pass through for fixed
+				 * input-only or output-only pins without
+				 * function enum register association.
+				 */
+				if (in_range && enum_id == range->force)
+					continue;
+			} else {
+				/*
+				 * no other range exists, so this pin
+				 * must then be of the function type.
+				 *
+				 * allow function type pins to select
+				 * any combination of function/in/out
+				 * in their MARK lists.
+				 */
+				in_range = 1;
+			}
 		}
 
 		if (!in_range)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f55eb01..0fee95c 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -100,6 +100,23 @@
 	  inexpensive battery powered microcontroller evaluation board.
 	  This same cable can be used to flash new firmware.
 
+config SPI_COLDFIRE_QSPI
+	tristate "Freescale Coldfire QSPI controller"
+	depends on (M520x || M523x || M5249 || M527x || M528x || M532x)
+	help
+	  This enables support for the Coldfire QSPI controller in master
+	  mode.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called coldfire_qspi.
+
+config SPI_DAVINCI
+	tristate "SPI controller driver for DaVinci/DA8xx SoC's"
+	depends on SPI_MASTER && ARCH_DAVINCI
+	select SPI_BITBANG
+	help
+	  SPI master controller for DaVinci and DA8xx SPI modules.
+
 config SPI_GPIO
 	tristate "GPIO-based bitbanging SPI Master"
 	depends on GENERIC_GPIO
@@ -308,7 +325,7 @@
 #
 
 config SPI_DESIGNWARE
-	bool "DesignWare SPI controller core support"
+	tristate "DesignWare SPI controller core support"
 	depends on SPI_MASTER
 	help
 	  general driver for SPI controller core from DesignWare
@@ -317,6 +334,10 @@
 	tristate "PCI interface driver for DW SPI core"
 	depends on SPI_DESIGNWARE && PCI
 
+config SPI_DW_MMIO
+	tristate "Memory-mapped io interface driver for DW SPI core"
+	depends on SPI_DESIGNWARE && HAVE_CLK
+
 #
 # There are lots of SPI device types, with sensors and memory
 # being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index f3d2810..d7d0f89 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,8 +16,11 @@
 obj-$(CONFIG_SPI_BITBANG)		+= spi_bitbang.o
 obj-$(CONFIG_SPI_AU1550)		+= au1550_spi.o
 obj-$(CONFIG_SPI_BUTTERFLY)		+= spi_butterfly.o
+obj-$(CONFIG_SPI_COLDFIRE_QSPI)		+= coldfire_qspi.o
+obj-$(CONFIG_SPI_DAVINCI)		+= davinci_spi.o
 obj-$(CONFIG_SPI_DESIGNWARE)		+= dw_spi.o
 obj-$(CONFIG_SPI_DW_PCI)		+= dw_spi_pci.o
+obj-$(CONFIG_SPI_DW_MMIO)		+= dw_spi_mmio.o
 obj-$(CONFIG_SPI_GPIO)			+= spi_gpio.o
 obj-$(CONFIG_SPI_IMX)			+= spi_imx.o
 obj-$(CONFIG_SPI_LM70_LLP)		+= spi_lm70llp.o
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c
new file mode 100644
index 0000000..59be3ef
--- /dev/null
+++ b/drivers/spi/coldfire_qspi.c
@@ -0,0 +1,640 @@
+/*
+ * Freescale/Motorola Coldfire Queued SPI driver
+ *
+ * Copyright 2010 Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
+ *
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfqspi.h>
+
+#define	DRIVER_NAME "mcfqspi"
+
+#define	MCFQSPI_BUSCLK			(MCF_BUSCLK / 2)
+
+#define	MCFQSPI_QMR			0x00
+#define		MCFQSPI_QMR_MSTR	0x8000
+#define		MCFQSPI_QMR_CPOL	0x0200
+#define		MCFQSPI_QMR_CPHA	0x0100
+#define	MCFQSPI_QDLYR			0x04
+#define		MCFQSPI_QDLYR_SPE	0x8000
+#define	MCFQSPI_QWR			0x08
+#define		MCFQSPI_QWR_HALT	0x8000
+#define		MCFQSPI_QWR_WREN	0x4000
+#define		MCFQSPI_QWR_CSIV	0x1000
+#define	MCFQSPI_QIR			0x0C
+#define		MCFQSPI_QIR_WCEFB	0x8000
+#define		MCFQSPI_QIR_ABRTB	0x4000
+#define		MCFQSPI_QIR_ABRTL	0x1000
+#define		MCFQSPI_QIR_WCEFE	0x0800
+#define		MCFQSPI_QIR_ABRTE	0x0400
+#define		MCFQSPI_QIR_SPIFE	0x0100
+#define		MCFQSPI_QIR_WCEF	0x0008
+#define		MCFQSPI_QIR_ABRT	0x0004
+#define		MCFQSPI_QIR_SPIF	0x0001
+#define	MCFQSPI_QAR			0x010
+#define		MCFQSPI_QAR_TXBUF	0x00
+#define		MCFQSPI_QAR_RXBUF	0x10
+#define		MCFQSPI_QAR_CMDBUF	0x20
+#define	MCFQSPI_QDR			0x014
+#define	MCFQSPI_QCR			0x014
+#define		MCFQSPI_QCR_CONT	0x8000
+#define		MCFQSPI_QCR_BITSE	0x4000
+#define		MCFQSPI_QCR_DT		0x2000
+
+struct mcfqspi {
+	void __iomem *iobase;
+	int irq;
+	struct clk *clk;
+	struct mcfqspi_cs_control *cs_control;
+
+	wait_queue_head_t waitq;
+
+	struct work_struct work;
+	struct workqueue_struct *workq;
+	spinlock_t lock;
+	struct list_head msgq;
+};
+
+static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val)
+{
+	writew(val, mcfqspi->iobase + MCFQSPI_QMR);
+}
+
+static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val)
+{
+	writew(val, mcfqspi->iobase + MCFQSPI_QDLYR);
+}
+
+static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi)
+{
+	return readw(mcfqspi->iobase + MCFQSPI_QDLYR);
+}
+
+static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val)
+{
+	writew(val, mcfqspi->iobase + MCFQSPI_QWR);
+}
+
+static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val)
+{
+	writew(val, mcfqspi->iobase + MCFQSPI_QIR);
+}
+
+static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val)
+{
+	writew(val, mcfqspi->iobase + MCFQSPI_QAR);
+}
+
+static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val)
+{
+	writew(val, mcfqspi->iobase + MCFQSPI_QDR);
+}
+
+static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi)
+{
+	return readw(mcfqspi->iobase + MCFQSPI_QDR);
+}
+
+static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select,
+			    bool cs_high)
+{
+	mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high);
+}
+
+static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select,
+				bool cs_high)
+{
+	mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high);
+}
+
+static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi)
+{
+	return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ?
+		mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0;
+}
+
+static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi)
+{
+	if (mcfqspi->cs_control && mcfqspi->cs_control->teardown)
+		mcfqspi->cs_control->teardown(mcfqspi->cs_control);
+}
+
+static u8 mcfqspi_qmr_baud(u32 speed_hz)
+{
+	return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u);
+}
+
+static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi)
+{
+	return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE;
+}
+
+static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id)
+{
+	struct mcfqspi *mcfqspi = dev_id;
+
+	/* clear interrupt */
+	mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF);
+	wake_up(&mcfqspi->waitq);
+
+	return IRQ_HANDLED;
+}
+
+static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count,
+				  const u8 *txbuf, u8 *rxbuf)
+{
+	unsigned i, n, offset = 0;
+
+	n = min(count, 16u);
+
+	mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF);
+	for (i = 0; i < n; ++i)
+		mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE);
+
+	mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF);
+	if (txbuf)
+		for (i = 0; i < n; ++i)
+			mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+	else
+		for (i = 0; i < count; ++i)
+			mcfqspi_wr_qdr(mcfqspi, 0);
+
+	count -= n;
+	if (count) {
+		u16 qwr = 0xf08;
+		mcfqspi_wr_qwr(mcfqspi, 0x700);
+		mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+
+		do {
+			wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+			mcfqspi_wr_qwr(mcfqspi, qwr);
+			mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+			if (rxbuf) {
+				mcfqspi_wr_qar(mcfqspi,
+					       MCFQSPI_QAR_RXBUF + offset);
+				for (i = 0; i < 8; ++i)
+					*rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+			}
+			n = min(count, 8u);
+			if (txbuf) {
+				mcfqspi_wr_qar(mcfqspi,
+					       MCFQSPI_QAR_TXBUF + offset);
+				for (i = 0; i < n; ++i)
+					mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+			}
+			qwr = (offset ? 0x808 : 0) + ((n - 1) << 8);
+			offset ^= 8;
+			count -= n;
+		} while (count);
+		wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+		mcfqspi_wr_qwr(mcfqspi, qwr);
+		mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+		if (rxbuf) {
+			mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+			for (i = 0; i < 8; ++i)
+				*rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+			offset ^= 8;
+		}
+	} else {
+		mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8);
+		mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+	}
+	wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+	if (rxbuf) {
+		mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+		for (i = 0; i < n; ++i)
+			*rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+	}
+}
+
+static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
+				   const u16 *txbuf, u16 *rxbuf)
+{
+	unsigned i, n, offset = 0;
+
+	n = min(count, 16u);
+
+	mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF);
+	for (i = 0; i < n; ++i)
+		mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE);
+
+	mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF);
+	if (txbuf)
+		for (i = 0; i < n; ++i)
+			mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+	else
+		for (i = 0; i < count; ++i)
+			mcfqspi_wr_qdr(mcfqspi, 0);
+
+	count -= n;
+	if (count) {
+		u16 qwr = 0xf08;
+		mcfqspi_wr_qwr(mcfqspi, 0x700);
+		mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+
+		do {
+			wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+			mcfqspi_wr_qwr(mcfqspi, qwr);
+			mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+			if (rxbuf) {
+				mcfqspi_wr_qar(mcfqspi,
+					       MCFQSPI_QAR_RXBUF + offset);
+				for (i = 0; i < 8; ++i)
+					*rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+			}
+			n = min(count, 8u);
+			if (txbuf) {
+				mcfqspi_wr_qar(mcfqspi,
+					       MCFQSPI_QAR_TXBUF + offset);
+				for (i = 0; i < n; ++i)
+					mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+			}
+			qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8);
+			offset ^= 8;
+			count -= n;
+		} while (count);
+		wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+		mcfqspi_wr_qwr(mcfqspi, qwr);
+		mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+		if (rxbuf) {
+			mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+			for (i = 0; i < 8; ++i)
+				*rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+			offset ^= 8;
+		}
+	} else {
+		mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8);
+		mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+	}
+	wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+	if (rxbuf) {
+		mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+		for (i = 0; i < n; ++i)
+			*rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+	}
+}
+
+static void mcfqspi_work(struct work_struct *work)
+{
+	struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work);
+	unsigned long flags;
+
+	spin_lock_irqsave(&mcfqspi->lock, flags);
+	while (!list_empty(&mcfqspi->msgq)) {
+		struct spi_message *msg;
+		struct spi_device *spi;
+		struct spi_transfer *xfer;
+		int status = 0;
+
+		msg = container_of(mcfqspi->msgq.next, struct spi_message,
+				   queue);
+
+		list_del_init(&mcfqspi->msgq);
+		spin_unlock_irqrestore(&mcfqspi->lock, flags);
+
+		spi = msg->spi;
+
+		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+			bool cs_high = spi->mode & SPI_CS_HIGH;
+			u16 qmr = MCFQSPI_QMR_MSTR;
+
+			if (xfer->bits_per_word)
+				qmr |= xfer->bits_per_word << 10;
+			else
+				qmr |= spi->bits_per_word << 10;
+			if (spi->mode & SPI_CPHA)
+				qmr |= MCFQSPI_QMR_CPHA;
+			if (spi->mode & SPI_CPOL)
+				qmr |= MCFQSPI_QMR_CPOL;
+			if (xfer->speed_hz)
+				qmr |= mcfqspi_qmr_baud(xfer->speed_hz);
+			else
+				qmr |= mcfqspi_qmr_baud(spi->max_speed_hz);
+			mcfqspi_wr_qmr(mcfqspi, qmr);
+
+			mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
+
+			mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
+			if ((xfer->bits_per_word ? xfer->bits_per_word :
+						spi->bits_per_word) == 8)
+				mcfqspi_transfer_msg8(mcfqspi, xfer->len,
+						      xfer->tx_buf,
+						      xfer->rx_buf);
+			else
+				mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2,
+						       xfer->tx_buf,
+						       xfer->rx_buf);
+			mcfqspi_wr_qir(mcfqspi, 0);
+
+			if (xfer->delay_usecs)
+				udelay(xfer->delay_usecs);
+			if (xfer->cs_change) {
+				if (!list_is_last(&xfer->transfer_list,
+						  &msg->transfers))
+					mcfqspi_cs_deselect(mcfqspi,
+							    spi->chip_select,
+							    cs_high);
+			} else {
+				if (list_is_last(&xfer->transfer_list,
+						 &msg->transfers))
+					mcfqspi_cs_deselect(mcfqspi,
+							    spi->chip_select,
+							    cs_high);
+			}
+			msg->actual_length += xfer->len;
+		}
+		msg->status = status;
+		msg->complete(msg->context);
+
+		spin_lock_irqsave(&mcfqspi->lock, flags);
+	}
+	spin_unlock_irqrestore(&mcfqspi->lock, flags);
+}
+
+static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+	struct mcfqspi *mcfqspi;
+	struct spi_transfer *xfer;
+	unsigned long flags;
+
+	mcfqspi = spi_master_get_devdata(spi->master);
+
+	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+		if (xfer->bits_per_word && ((xfer->bits_per_word < 8)
+					|| (xfer->bits_per_word > 16))) {
+			dev_dbg(&spi->dev,
+				"%d bits per word is not supported\n",
+				xfer->bits_per_word);
+			goto fail;
+		}
+		if (xfer->speed_hz) {
+			u32 real_speed = MCFQSPI_BUSCLK /
+				mcfqspi_qmr_baud(xfer->speed_hz);
+			if (real_speed != xfer->speed_hz)
+				dev_dbg(&spi->dev,
+					"using speed %d instead of %d\n",
+					real_speed, xfer->speed_hz);
+		}
+	}
+	msg->status = -EINPROGRESS;
+	msg->actual_length = 0;
+
+	spin_lock_irqsave(&mcfqspi->lock, flags);
+	list_add_tail(&msg->queue, &mcfqspi->msgq);
+	queue_work(mcfqspi->workq, &mcfqspi->work);
+	spin_unlock_irqrestore(&mcfqspi->lock, flags);
+
+	return 0;
+fail:
+	msg->status = -EINVAL;
+	return -EINVAL;
+}
+
+static int mcfqspi_setup(struct spi_device *spi)
+{
+	if ((spi->bits_per_word < 8) || (spi->bits_per_word > 16)) {
+		dev_dbg(&spi->dev, "%d bits per word is not supported\n",
+			spi->bits_per_word);
+		return -EINVAL;
+	}
+	if (spi->chip_select >= spi->master->num_chipselect) {
+		dev_dbg(&spi->dev, "%d chip select is out of range\n",
+			spi->chip_select);
+		return -EINVAL;
+	}
+
+	mcfqspi_cs_deselect(spi_master_get_devdata(spi->master),
+			    spi->chip_select, spi->mode & SPI_CS_HIGH);
+
+	dev_dbg(&spi->dev,
+			"bits per word %d, chip select %d, speed %d KHz\n",
+			spi->bits_per_word, spi->chip_select,
+			(MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz))
+			/ 1000);
+
+	return 0;
+}
+
+static int __devinit mcfqspi_probe(struct platform_device *pdev)
+{
+	struct spi_master *master;
+	struct mcfqspi *mcfqspi;
+	struct resource *res;
+	struct mcfqspi_platform_data *pdata;
+	int status;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi));
+	if (master == NULL) {
+		dev_dbg(&pdev->dev, "spi_alloc_master failed\n");
+		return -ENOMEM;
+	}
+
+	mcfqspi = spi_master_get_devdata(master);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_dbg(&pdev->dev, "platform_get_resource failed\n");
+		status = -ENXIO;
+		goto fail0;
+	}
+
+	if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+		dev_dbg(&pdev->dev, "request_mem_region failed\n");
+		status = -EBUSY;
+		goto fail0;
+	}
+
+	mcfqspi->iobase = ioremap(res->start, resource_size(res));
+	if (!mcfqspi->iobase) {
+		dev_dbg(&pdev->dev, "ioremap failed\n");
+		status = -ENOMEM;
+		goto fail1;
+	}
+
+	mcfqspi->irq = platform_get_irq(pdev, 0);
+	if (mcfqspi->irq < 0) {
+		dev_dbg(&pdev->dev, "platform_get_irq failed\n");
+		status = -ENXIO;
+		goto fail2;
+	}
+
+	status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, IRQF_DISABLED,
+			     pdev->name, mcfqspi);
+	if (status) {
+		dev_dbg(&pdev->dev, "request_irq failed\n");
+		goto fail2;
+	}
+
+	mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk");
+	if (IS_ERR(mcfqspi->clk)) {
+		dev_dbg(&pdev->dev, "clk_get failed\n");
+		status = PTR_ERR(mcfqspi->clk);
+		goto fail3;
+	}
+	clk_enable(mcfqspi->clk);
+
+	mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent));
+	if (!mcfqspi->workq) {
+		dev_dbg(&pdev->dev, "create_workqueue failed\n");
+		status = -ENOMEM;
+		goto fail4;
+	}
+	INIT_WORK(&mcfqspi->work, mcfqspi_work);
+	spin_lock_init(&mcfqspi->lock);
+	INIT_LIST_HEAD(&mcfqspi->msgq);
+	init_waitqueue_head(&mcfqspi->waitq);
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_dbg(&pdev->dev, "platform data is missing\n");
+		goto fail5;
+	}
+	master->bus_num = pdata->bus_num;
+	master->num_chipselect = pdata->num_chipselect;
+
+	mcfqspi->cs_control = pdata->cs_control;
+	status = mcfqspi_cs_setup(mcfqspi);
+	if (status) {
+		dev_dbg(&pdev->dev, "error initializing cs_control\n");
+		goto fail5;
+	}
+
+	master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
+	master->setup = mcfqspi_setup;
+	master->transfer = mcfqspi_transfer;
+
+	platform_set_drvdata(pdev, master);
+
+	status = spi_register_master(master);
+	if (status) {
+		dev_dbg(&pdev->dev, "spi_register_master failed\n");
+		goto fail6;
+	}
+	dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
+
+	return 0;
+
+fail6:
+	mcfqspi_cs_teardown(mcfqspi);
+fail5:
+	destroy_workqueue(mcfqspi->workq);
+fail4:
+	clk_disable(mcfqspi->clk);
+	clk_put(mcfqspi->clk);
+fail3:
+	free_irq(mcfqspi->irq, mcfqspi);
+fail2:
+	iounmap(mcfqspi->iobase);
+fail1:
+	release_mem_region(res->start, resource_size(res));
+fail0:
+	spi_master_put(master);
+
+	dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n");
+
+	return status;
+}
+
+static int __devexit mcfqspi_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	/* disable the hardware (set the baud rate to 0) */
+	mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
+
+	platform_set_drvdata(pdev, NULL);
+	mcfqspi_cs_teardown(mcfqspi);
+	destroy_workqueue(mcfqspi->workq);
+	clk_disable(mcfqspi->clk);
+	clk_put(mcfqspi->clk);
+	free_irq(mcfqspi->irq, mcfqspi);
+	iounmap(mcfqspi->iobase);
+	release_mem_region(res->start, resource_size(res));
+	spi_unregister_master(master);
+	spi_master_put(master);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int mcfqspi_suspend(struct device *dev)
+{
+	struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+
+	clk_disable(mcfqspi->clk);
+
+	return 0;
+}
+
+static int mcfqspi_resume(struct device *dev)
+{
+	struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+
+	clk_enable(mcfqspi->clk);
+
+	return 0;
+}
+
+static struct dev_pm_ops mcfqspi_dev_pm_ops = {
+	.suspend	= mcfqspi_suspend,
+	.resume		= mcfqspi_resume,
+};
+
+#define	MCFQSPI_DEV_PM_OPS	(&mcfqspi_dev_pm_ops)
+#else
+#define	MCFQSPI_DEV_PM_OPS	NULL
+#endif
+
+static struct platform_driver mcfqspi_driver = {
+	.driver.name	= DRIVER_NAME,
+	.driver.owner	= THIS_MODULE,
+	.driver.pm	= MCFQSPI_DEV_PM_OPS,
+	.remove		= __devexit_p(mcfqspi_remove),
+};
+
+static int __init mcfqspi_init(void)
+{
+	return platform_driver_probe(&mcfqspi_driver, mcfqspi_probe);
+}
+module_init(mcfqspi_init);
+
+static void __exit mcfqspi_exit(void)
+{
+	platform_driver_unregister(&mcfqspi_driver);
+}
+module_exit(mcfqspi_exit);
+
+MODULE_AUTHOR("Steven King <sfking@fdwdc.com>");
+MODULE_DESCRIPTION("Coldfire QSPI Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c
new file mode 100644
index 0000000..225ab60
--- /dev/null
+++ b/drivers/spi/davinci_spi.c
@@ -0,0 +1,1255 @@
+/*
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <mach/spi.h>
+#include <mach/edma.h>
+
+#define SPI_NO_RESOURCE		((resource_size_t)-1)
+
+#define SPI_MAX_CHIPSELECT	2
+
+#define CS_DEFAULT	0xFF
+
+#define SPI_BUFSIZ	(SMP_CACHE_BYTES + 1)
+#define DAVINCI_DMA_DATA_TYPE_S8	0x01
+#define DAVINCI_DMA_DATA_TYPE_S16	0x02
+#define DAVINCI_DMA_DATA_TYPE_S32	0x04
+
+#define SPIFMT_PHASE_MASK	BIT(16)
+#define SPIFMT_POLARITY_MASK	BIT(17)
+#define SPIFMT_DISTIMER_MASK	BIT(18)
+#define SPIFMT_SHIFTDIR_MASK	BIT(20)
+#define SPIFMT_WAITENA_MASK	BIT(21)
+#define SPIFMT_PARITYENA_MASK	BIT(22)
+#define SPIFMT_ODD_PARITY_MASK	BIT(23)
+#define SPIFMT_WDELAY_MASK	0x3f000000u
+#define SPIFMT_WDELAY_SHIFT	24
+#define SPIFMT_CHARLEN_MASK	0x0000001Fu
+
+/* SPIGCR1 */
+#define SPIGCR1_SPIENA_MASK	0x01000000u
+
+/* SPIPC0 */
+#define SPIPC0_DIFUN_MASK	BIT(11)		/* MISO */
+#define SPIPC0_DOFUN_MASK	BIT(10)		/* MOSI */
+#define SPIPC0_CLKFUN_MASK	BIT(9)		/* CLK */
+#define SPIPC0_SPIENA_MASK	BIT(8)		/* nREADY */
+#define SPIPC0_EN1FUN_MASK	BIT(1)
+#define SPIPC0_EN0FUN_MASK	BIT(0)
+
+#define SPIINT_MASKALL		0x0101035F
+#define SPI_INTLVL_1		0x000001FFu
+#define SPI_INTLVL_0		0x00000000u
+
+/* SPIDAT1 */
+#define SPIDAT1_CSHOLD_SHIFT	28
+#define SPIDAT1_CSNR_SHIFT	16
+#define SPIGCR1_CLKMOD_MASK	BIT(1)
+#define SPIGCR1_MASTER_MASK     BIT(0)
+#define SPIGCR1_LOOPBACK_MASK	BIT(16)
+
+/* SPIBUF */
+#define SPIBUF_TXFULL_MASK	BIT(29)
+#define SPIBUF_RXEMPTY_MASK	BIT(31)
+
+/* Error Masks */
+#define SPIFLG_DLEN_ERR_MASK		BIT(0)
+#define SPIFLG_TIMEOUT_MASK		BIT(1)
+#define SPIFLG_PARERR_MASK		BIT(2)
+#define SPIFLG_DESYNC_MASK		BIT(3)
+#define SPIFLG_BITERR_MASK		BIT(4)
+#define SPIFLG_OVRRUN_MASK		BIT(6)
+#define SPIFLG_RX_INTR_MASK		BIT(8)
+#define SPIFLG_TX_INTR_MASK		BIT(9)
+#define SPIFLG_BUF_INIT_ACTIVE_MASK	BIT(24)
+#define SPIFLG_MASK			(SPIFLG_DLEN_ERR_MASK \
+				| SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
+				| SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
+				| SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \
+				| SPIFLG_TX_INTR_MASK \
+				| SPIFLG_BUF_INIT_ACTIVE_MASK)
+
+#define SPIINT_DLEN_ERR_INTR	BIT(0)
+#define SPIINT_TIMEOUT_INTR	BIT(1)
+#define SPIINT_PARERR_INTR	BIT(2)
+#define SPIINT_DESYNC_INTR	BIT(3)
+#define SPIINT_BITERR_INTR	BIT(4)
+#define SPIINT_OVRRUN_INTR	BIT(6)
+#define SPIINT_RX_INTR		BIT(8)
+#define SPIINT_TX_INTR		BIT(9)
+#define SPIINT_DMA_REQ_EN	BIT(16)
+#define SPIINT_ENABLE_HIGHZ	BIT(24)
+
+#define SPI_T2CDELAY_SHIFT	16
+#define SPI_C2TDELAY_SHIFT	24
+
+/* SPI Controller registers */
+#define SPIGCR0		0x00
+#define SPIGCR1		0x04
+#define SPIINT		0x08
+#define SPILVL		0x0c
+#define SPIFLG		0x10
+#define SPIPC0		0x14
+#define SPIPC1		0x18
+#define SPIPC2		0x1c
+#define SPIPC3		0x20
+#define SPIPC4		0x24
+#define SPIPC5		0x28
+#define SPIPC6		0x2c
+#define SPIPC7		0x30
+#define SPIPC8		0x34
+#define SPIDAT0		0x38
+#define SPIDAT1		0x3c
+#define SPIBUF		0x40
+#define SPIEMU		0x44
+#define SPIDELAY	0x48
+#define SPIDEF		0x4c
+#define SPIFMT0		0x50
+#define SPIFMT1		0x54
+#define SPIFMT2		0x58
+#define SPIFMT3		0x5c
+#define TGINTVEC0	0x60
+#define TGINTVEC1	0x64
+
+struct davinci_spi_slave {
+	u32	cmd_to_write;
+	u32	clk_ctrl_to_write;
+	u32	bytes_per_word;
+	u8	active_cs;
+};
+
+/* We have 2 DMA channels per CS, one for RX and one for TX */
+struct davinci_spi_dma {
+	int			dma_tx_channel;
+	int			dma_rx_channel;
+	int			dma_tx_sync_dev;
+	int			dma_rx_sync_dev;
+	enum dma_event_q	eventq;
+
+	struct completion	dma_tx_completion;
+	struct completion	dma_rx_completion;
+};
+
+/* SPI Controller driver's private data. */
+struct davinci_spi {
+	struct spi_bitbang	bitbang;
+	struct clk		*clk;
+
+	u8			version;
+	resource_size_t		pbase;
+	void __iomem		*base;
+	size_t			region_size;
+	u32			irq;
+	struct completion	done;
+
+	const void		*tx;
+	void			*rx;
+	u8			*tmp_buf;
+	int			count;
+	struct davinci_spi_dma	*dma_channels;
+	struct			davinci_spi_platform_data *pdata;
+
+	void			(*get_rx)(u32 rx_data, struct davinci_spi *);
+	u32			(*get_tx)(struct davinci_spi *);
+
+	struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
+};
+
+static unsigned use_dma;
+
+static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
+{
+	u8 *rx = davinci_spi->rx;
+
+	*rx++ = (u8)data;
+	davinci_spi->rx = rx;
+}
+
+static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
+{
+	u16 *rx = davinci_spi->rx;
+
+	*rx++ = (u16)data;
+	davinci_spi->rx = rx;
+}
+
+static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
+{
+	u32 data;
+	const u8 *tx = davinci_spi->tx;
+
+	data = *tx++;
+	davinci_spi->tx = tx;
+	return data;
+}
+
+static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
+{
+	u32 data;
+	const u16 *tx = davinci_spi->tx;
+
+	data = *tx++;
+	davinci_spi->tx = tx;
+	return data;
+}
+
+static inline void set_io_bits(void __iomem *addr, u32 bits)
+{
+	u32 v = ioread32(addr);
+
+	v |= bits;
+	iowrite32(v, addr);
+}
+
+static inline void clear_io_bits(void __iomem *addr, u32 bits)
+{
+	u32 v = ioread32(addr);
+
+	v &= ~bits;
+	iowrite32(v, addr);
+}
+
+static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
+{
+	set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
+}
+
+static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
+{
+	clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
+}
+
+static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
+{
+	struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
+
+	if (enable)
+		set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
+	else
+		clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
+}
+
+/*
+ * Interface to control the chip select signal
+ */
+static void davinci_spi_chipselect(struct spi_device *spi, int value)
+{
+	struct davinci_spi *davinci_spi;
+	struct davinci_spi_platform_data *pdata;
+	u32 data1_reg_val = 0;
+
+	davinci_spi = spi_master_get_devdata(spi->master);
+	pdata = davinci_spi->pdata;
+
+	/*
+	 * Board specific chip select logic decides the polarity and cs
+	 * line for the controller
+	 */
+	if (value == BITBANG_CS_INACTIVE) {
+		set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT);
+
+		data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT;
+		iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
+
+		while ((ioread32(davinci_spi->base + SPIBUF)
+					& SPIBUF_RXEMPTY_MASK) == 0)
+			cpu_relax();
+	}
+}
+
+/**
+ * davinci_spi_setup_transfer - This functions will determine transfer method
+ * @spi: spi device on which data transfer to be done
+ * @t: spi transfer in which transfer info is filled
+ *
+ * This function determines data transfer method (8/16/32 bit transfer).
+ * It will also set the SPI Clock Control register according to
+ * SPI slave device freq.
+ */
+static int davinci_spi_setup_transfer(struct spi_device *spi,
+		struct spi_transfer *t)
+{
+
+	struct davinci_spi *davinci_spi;
+	struct davinci_spi_platform_data *pdata;
+	u8 bits_per_word = 0;
+	u32 hz = 0, prescale;
+
+	davinci_spi = spi_master_get_devdata(spi->master);
+	pdata = davinci_spi->pdata;
+
+	if (t) {
+		bits_per_word = t->bits_per_word;
+		hz = t->speed_hz;
+	}
+
+	/* if bits_per_word is not set then set it default */
+	if (!bits_per_word)
+		bits_per_word = spi->bits_per_word;
+
+	/*
+	 * Assign function pointer to appropriate transfer method
+	 * 8bit, 16bit or 32bit transfer
+	 */
+	if (bits_per_word <= 8 && bits_per_word >= 2) {
+		davinci_spi->get_rx = davinci_spi_rx_buf_u8;
+		davinci_spi->get_tx = davinci_spi_tx_buf_u8;
+		davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
+	} else if (bits_per_word <= 16 && bits_per_word >= 2) {
+		davinci_spi->get_rx = davinci_spi_rx_buf_u16;
+		davinci_spi->get_tx = davinci_spi_tx_buf_u16;
+		davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
+	} else
+		return -EINVAL;
+
+	if (!hz)
+		hz = spi->max_speed_hz;
+
+	clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK,
+			spi->chip_select);
+	set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
+			spi->chip_select);
+
+	prescale = ((clk_get_rate(davinci_spi->clk) / hz) - 1) & 0xff;
+
+	clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
+	set_fmt_bits(davinci_spi->base, prescale << 8, spi->chip_select);
+
+	return 0;
+}
+
+static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
+{
+	struct spi_device *spi = (struct spi_device *)data;
+	struct davinci_spi *davinci_spi;
+	struct davinci_spi_dma *davinci_spi_dma;
+	struct davinci_spi_platform_data *pdata;
+
+	davinci_spi = spi_master_get_devdata(spi->master);
+	davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
+	pdata = davinci_spi->pdata;
+
+	if (ch_status == DMA_COMPLETE)
+		edma_stop(davinci_spi_dma->dma_rx_channel);
+	else
+		edma_clean_channel(davinci_spi_dma->dma_rx_channel);
+
+	complete(&davinci_spi_dma->dma_rx_completion);
+	/* We must disable the DMA RX request */
+	davinci_spi_set_dma_req(spi, 0);
+}
+
+static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
+{
+	struct spi_device *spi = (struct spi_device *)data;
+	struct davinci_spi *davinci_spi;
+	struct davinci_spi_dma *davinci_spi_dma;
+	struct davinci_spi_platform_data *pdata;
+
+	davinci_spi = spi_master_get_devdata(spi->master);
+	davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
+	pdata = davinci_spi->pdata;
+
+	if (ch_status == DMA_COMPLETE)
+		edma_stop(davinci_spi_dma->dma_tx_channel);
+	else
+		edma_clean_channel(davinci_spi_dma->dma_tx_channel);
+
+	complete(&davinci_spi_dma->dma_tx_completion);
+	/* We must disable the DMA TX request */
+	davinci_spi_set_dma_req(spi, 0);
+}
+
+static int davinci_spi_request_dma(struct spi_device *spi)
+{
+	struct davinci_spi *davinci_spi;
+	struct davinci_spi_dma *davinci_spi_dma;
+	struct davinci_spi_platform_data *pdata;
+	struct device *sdev;
+	int r;
+
+	davinci_spi = spi_master_get_devdata(spi->master);
+	davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
+	pdata = davinci_spi->pdata;
+	sdev = davinci_spi->bitbang.master->dev.parent;
+
+	r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
+				davinci_spi_dma_rx_callback, spi,
+				davinci_spi_dma->eventq);
+	if (r < 0) {
+		dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
+		return -EAGAIN;
+	}
+	davinci_spi_dma->dma_rx_channel = r;
+	r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
+				davinci_spi_dma_tx_callback, spi,
+				davinci_spi_dma->eventq);
+	if (r < 0) {
+		edma_free_channel(davinci_spi_dma->dma_rx_channel);
+		davinci_spi_dma->dma_rx_channel = -1;
+		dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
+		return -EAGAIN;
+	}
+	davinci_spi_dma->dma_tx_channel = r;
+
+	return 0;
+}
+
+/**
+ * davinci_spi_setup - This functions will set default transfer method
+ * @spi: spi device on which data transfer to be done
+ *
+ * This functions sets the default transfer method.
+ */
+
+static int davinci_spi_setup(struct spi_device *spi)
+{
+	int retval;
+	struct davinci_spi *davinci_spi;
+	struct davinci_spi_dma *davinci_spi_dma;
+	struct device *sdev;
+
+	davinci_spi = spi_master_get_devdata(spi->master);
+	sdev = davinci_spi->bitbang.master->dev.parent;
+
+	/* if bits per word length is zero then set it default 8 */
+	if (!spi->bits_per_word)
+		spi->bits_per_word = 8;
+
+	davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
+
+	if (use_dma && davinci_spi->dma_channels) {
+		davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
+
+		if ((davinci_spi_dma->dma_rx_channel == -1)
+				|| (davinci_spi_dma->dma_tx_channel == -1)) {
+			retval = davinci_spi_request_dma(spi);
+			if (retval < 0)
+				return retval;
+		}
+	}
+
+	/*
+	 * SPI in DaVinci and DA8xx operate between
+	 * 600 KHz and 50 MHz
+	 */
+	if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) {
+		dev_dbg(sdev, "Operating frequency is not in acceptable "
+				"range\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Set up SPIFMTn register, unique to this chipselect.
+	 *
+	 * NOTE: we could do all of these with one write.  Also, some
+	 * of the "version 2" features are found in chips that don't
+	 * support all of them...
+	 */
+	if (spi->mode & SPI_LSB_FIRST)
+		set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
+				spi->chip_select);
+	else
+		clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
+				spi->chip_select);
+
+	if (spi->mode & SPI_CPOL)
+		set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
+				spi->chip_select);
+	else
+		clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
+				spi->chip_select);
+
+	if (!(spi->mode & SPI_CPHA))
+		set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
+				spi->chip_select);
+	else
+		clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
+				spi->chip_select);
+
+	/*
+	 * Version 1 hardware supports two basic SPI modes:
+	 *  - Standard SPI mode uses 4 pins, with chipselect
+	 *  - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
+	 *	(distinct from SPI_3WIRE, with just one data wire;
+	 *	or similar variants without MOSI or without MISO)
+	 *
+	 * Version 2 hardware supports an optional handshaking signal,
+	 * so it can support two more modes:
+	 *  - 5 pin SPI variant is standard SPI plus SPI_READY
+	 *  - 4 pin with enable is (SPI_READY | SPI_NO_CS)
+	 */
+
+	if (davinci_spi->version == SPI_VERSION_2) {
+		clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK,
+				spi->chip_select);
+		set_fmt_bits(davinci_spi->base,
+				(davinci_spi->pdata->wdelay
+						<< SPIFMT_WDELAY_SHIFT)
+					& SPIFMT_WDELAY_MASK,
+				spi->chip_select);
+
+		if (davinci_spi->pdata->odd_parity)
+			set_fmt_bits(davinci_spi->base,
+					SPIFMT_ODD_PARITY_MASK,
+					spi->chip_select);
+		else
+			clear_fmt_bits(davinci_spi->base,
+					SPIFMT_ODD_PARITY_MASK,
+					spi->chip_select);
+
+		if (davinci_spi->pdata->parity_enable)
+			set_fmt_bits(davinci_spi->base,
+					SPIFMT_PARITYENA_MASK,
+					spi->chip_select);
+		else
+			clear_fmt_bits(davinci_spi->base,
+					SPIFMT_PARITYENA_MASK,
+					spi->chip_select);
+
+		if (davinci_spi->pdata->wait_enable)
+			set_fmt_bits(davinci_spi->base,
+					SPIFMT_WAITENA_MASK,
+					spi->chip_select);
+		else
+			clear_fmt_bits(davinci_spi->base,
+					SPIFMT_WAITENA_MASK,
+					spi->chip_select);
+
+		if (davinci_spi->pdata->timer_disable)
+			set_fmt_bits(davinci_spi->base,
+					SPIFMT_DISTIMER_MASK,
+					spi->chip_select);
+		else
+			clear_fmt_bits(davinci_spi->base,
+					SPIFMT_DISTIMER_MASK,
+					spi->chip_select);
+	}
+
+	retval = davinci_spi_setup_transfer(spi, NULL);
+
+	return retval;
+}
+
+static void davinci_spi_cleanup(struct spi_device *spi)
+{
+	struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
+	struct davinci_spi_dma *davinci_spi_dma;
+
+	davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
+
+	if (use_dma && davinci_spi->dma_channels) {
+		davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
+
+		if ((davinci_spi_dma->dma_rx_channel != -1)
+				&& (davinci_spi_dma->dma_tx_channel != -1)) {
+			edma_free_channel(davinci_spi_dma->dma_tx_channel);
+			edma_free_channel(davinci_spi_dma->dma_rx_channel);
+		}
+	}
+}
+
+static int davinci_spi_bufs_prep(struct spi_device *spi,
+				 struct davinci_spi *davinci_spi)
+{
+	int op_mode = 0;
+
+	/*
+	 * REVISIT  unless devices disagree about SPI_LOOP or
+	 * SPI_READY (SPI_NO_CS only allows one device!), this
+	 * should not need to be done before each message...
+	 * optimize for both flags staying cleared.
+	 */
+
+	op_mode = SPIPC0_DIFUN_MASK
+		| SPIPC0_DOFUN_MASK
+		| SPIPC0_CLKFUN_MASK;
+	if (!(spi->mode & SPI_NO_CS))
+		op_mode |= 1 << spi->chip_select;
+	if (spi->mode & SPI_READY)
+		op_mode |= SPIPC0_SPIENA_MASK;
+
+	iowrite32(op_mode, davinci_spi->base + SPIPC0);
+
+	if (spi->mode & SPI_LOOP)
+		set_io_bits(davinci_spi->base + SPIGCR1,
+				SPIGCR1_LOOPBACK_MASK);
+	else
+		clear_io_bits(davinci_spi->base + SPIGCR1,
+				SPIGCR1_LOOPBACK_MASK);
+
+	return 0;
+}
+
+static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
+				   int int_status)
+{
+	struct device *sdev = davinci_spi->bitbang.master->dev.parent;
+
+	if (int_status & SPIFLG_TIMEOUT_MASK) {
+		dev_dbg(sdev, "SPI Time-out Error\n");
+		return -ETIMEDOUT;
+	}
+	if (int_status & SPIFLG_DESYNC_MASK) {
+		dev_dbg(sdev, "SPI Desynchronization Error\n");
+		return -EIO;
+	}
+	if (int_status & SPIFLG_BITERR_MASK) {
+		dev_dbg(sdev, "SPI Bit error\n");
+		return -EIO;
+	}
+
+	if (davinci_spi->version == SPI_VERSION_2) {
+		if (int_status & SPIFLG_DLEN_ERR_MASK) {
+			dev_dbg(sdev, "SPI Data Length Error\n");
+			return -EIO;
+		}
+		if (int_status & SPIFLG_PARERR_MASK) {
+			dev_dbg(sdev, "SPI Parity Error\n");
+			return -EIO;
+		}
+		if (int_status & SPIFLG_OVRRUN_MASK) {
+			dev_dbg(sdev, "SPI Data Overrun error\n");
+			return -EIO;
+		}
+		if (int_status & SPIFLG_TX_INTR_MASK) {
+			dev_dbg(sdev, "SPI TX intr bit set\n");
+			return -EIO;
+		}
+		if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
+			dev_dbg(sdev, "SPI Buffer Init Active\n");
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * davinci_spi_bufs - functions which will handle transfer data
+ * @spi: spi device on which data transfer to be done
+ * @t: spi transfer in which transfer info is filled
+ *
+ * This function will put data to be transferred into data register
+ * of SPI controller and then wait until the completion will be marked
+ * by the IRQ Handler.
+ */
+static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
+{
+	struct davinci_spi *davinci_spi;
+	int int_status, count, ret;
+	u8 conv, tmp;
+	u32 tx_data, data1_reg_val;
+	u32 buf_val, flg_val;
+	struct davinci_spi_platform_data *pdata;
+
+	davinci_spi = spi_master_get_devdata(spi->master);
+	pdata = davinci_spi->pdata;
+
+	davinci_spi->tx = t->tx_buf;
+	davinci_spi->rx = t->rx_buf;
+
+	/* convert len to words based on bits_per_word */
+	conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
+	davinci_spi->count = t->len / conv;
+
+	INIT_COMPLETION(davinci_spi->done);
+
+	ret = davinci_spi_bufs_prep(spi, davinci_spi);
+	if (ret)
+		return ret;
+
+	/* Enable SPI */
+	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+
+	iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
+			(pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
+			davinci_spi->base + SPIDELAY);
+
+	count = davinci_spi->count;
+	data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
+	tmp = ~(0x1 << spi->chip_select);
+
+	clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
+
+	data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
+
+	while ((ioread32(davinci_spi->base + SPIBUF)
+				& SPIBUF_RXEMPTY_MASK) == 0)
+		cpu_relax();
+
+	/* Determine the command to execute READ or WRITE */
+	if (t->tx_buf) {
+		clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
+
+		while (1) {
+			tx_data = davinci_spi->get_tx(davinci_spi);
+
+			data1_reg_val &= ~(0xFFFF);
+			data1_reg_val |= (0xFFFF & tx_data);
+
+			buf_val = ioread32(davinci_spi->base + SPIBUF);
+			if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
+				iowrite32(data1_reg_val,
+						davinci_spi->base + SPIDAT1);
+
+				count--;
+			}
+			while (ioread32(davinci_spi->base + SPIBUF)
+					& SPIBUF_RXEMPTY_MASK)
+				cpu_relax();
+
+			/* getting the returned byte */
+			if (t->rx_buf) {
+				buf_val = ioread32(davinci_spi->base + SPIBUF);
+				davinci_spi->get_rx(buf_val, davinci_spi);
+			}
+			if (count <= 0)
+				break;
+		}
+	} else {
+		if (pdata->poll_mode) {
+			while (1) {
+				/* keeps the serial clock going */
+				if ((ioread32(davinci_spi->base + SPIBUF)
+						& SPIBUF_TXFULL_MASK) == 0)
+					iowrite32(data1_reg_val,
+						davinci_spi->base + SPIDAT1);
+
+				while (ioread32(davinci_spi->base + SPIBUF) &
+						SPIBUF_RXEMPTY_MASK)
+					cpu_relax();
+
+				flg_val = ioread32(davinci_spi->base + SPIFLG);
+				buf_val = ioread32(davinci_spi->base + SPIBUF);
+
+				davinci_spi->get_rx(buf_val, davinci_spi);
+
+				count--;
+				if (count <= 0)
+					break;
+			}
+		} else {	/* Receive in Interrupt mode */
+			int i;
+
+			for (i = 0; i < davinci_spi->count; i++) {
+				set_io_bits(davinci_spi->base + SPIINT,
+						SPIINT_BITERR_INTR
+						| SPIINT_OVRRUN_INTR
+						| SPIINT_RX_INTR);
+
+				iowrite32(data1_reg_val,
+						davinci_spi->base + SPIDAT1);
+
+				while (ioread32(davinci_spi->base + SPIINT) &
+						SPIINT_RX_INTR)
+					cpu_relax();
+			}
+			iowrite32((data1_reg_val & 0x0ffcffff),
+					davinci_spi->base + SPIDAT1);
+		}
+	}
+
+	/*
+	 * Check for bit error, desync error,parity error,timeout error and
+	 * receive overflow errors
+	 */
+	int_status = ioread32(davinci_spi->base + SPIFLG);
+
+	ret = davinci_spi_check_error(davinci_spi, int_status);
+	if (ret != 0)
+		return ret;
+
+	/* SPI Framework maintains the count only in bytes so convert back */
+	davinci_spi->count *= conv;
+
+	return t->len;
+}
+
+#define DAVINCI_DMA_DATA_TYPE_S8	0x01
+#define DAVINCI_DMA_DATA_TYPE_S16	0x02
+#define DAVINCI_DMA_DATA_TYPE_S32	0x04
+
+static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
+{
+	struct davinci_spi *davinci_spi;
+	int int_status = 0;
+	int count, temp_count;
+	u8 conv = 1;
+	u8 tmp;
+	u32 data1_reg_val;
+	struct davinci_spi_dma *davinci_spi_dma;
+	int word_len, data_type, ret;
+	unsigned long tx_reg, rx_reg;
+	struct davinci_spi_platform_data *pdata;
+	struct device *sdev;
+
+	davinci_spi = spi_master_get_devdata(spi->master);
+	pdata = davinci_spi->pdata;
+	sdev = davinci_spi->bitbang.master->dev.parent;
+
+	davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
+
+	tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
+	rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
+
+	davinci_spi->tx = t->tx_buf;
+	davinci_spi->rx = t->rx_buf;
+
+	/* convert len to words based on bits_per_word */
+	conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
+	davinci_spi->count = t->len / conv;
+
+	INIT_COMPLETION(davinci_spi->done);
+
+	init_completion(&davinci_spi_dma->dma_rx_completion);
+	init_completion(&davinci_spi_dma->dma_tx_completion);
+
+	word_len = conv * 8;
+
+	if (word_len <= 8)
+		data_type = DAVINCI_DMA_DATA_TYPE_S8;
+	else if (word_len <= 16)
+		data_type = DAVINCI_DMA_DATA_TYPE_S16;
+	else if (word_len <= 32)
+		data_type = DAVINCI_DMA_DATA_TYPE_S32;
+	else
+		return -EINVAL;
+
+	ret = davinci_spi_bufs_prep(spi, davinci_spi);
+	if (ret)
+		return ret;
+
+	/* Put delay val if required */
+	iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
+			(pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
+			davinci_spi->base + SPIDELAY);
+
+	count = davinci_spi->count;	/* the number of elements */
+	data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
+
+	/* CS default = 0xFF */
+	tmp = ~(0x1 << spi->chip_select);
+
+	clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
+
+	data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
+
+	/* disable all interrupts for dma transfers */
+	clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
+	/* Disable SPI to write configuration bits in SPIDAT */
+	clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+	iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
+	/* Enable SPI */
+	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+
+	while ((ioread32(davinci_spi->base + SPIBUF)
+				& SPIBUF_RXEMPTY_MASK) == 0)
+		cpu_relax();
+
+
+	if (t->tx_buf) {
+		t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
+				DMA_TO_DEVICE);
+		if (dma_mapping_error(&spi->dev, t->tx_dma)) {
+			dev_dbg(sdev, "Unable to DMA map a %d bytes"
+				" TX buffer\n", count);
+			return -ENOMEM;
+		}
+		temp_count = count;
+	} else {
+		/* We need TX clocking for RX transaction */
+		t->tx_dma = dma_map_single(&spi->dev,
+				(void *)davinci_spi->tmp_buf, count + 1,
+				DMA_TO_DEVICE);
+		if (dma_mapping_error(&spi->dev, t->tx_dma)) {
+			dev_dbg(sdev, "Unable to DMA map a %d bytes"
+				" TX tmp buffer\n", count);
+			return -ENOMEM;
+		}
+		temp_count = count + 1;
+	}
+
+	edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
+					data_type, temp_count, 1, 0, ASYNC);
+	edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
+	edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
+	edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
+	edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
+
+	if (t->rx_buf) {
+		/* initiate transaction */
+		iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
+
+		t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
+				DMA_FROM_DEVICE);
+		if (dma_mapping_error(&spi->dev, t->rx_dma)) {
+			dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
+					count);
+			if (t->tx_buf != NULL)
+				dma_unmap_single(NULL, t->tx_dma,
+						 count, DMA_TO_DEVICE);
+			return -ENOMEM;
+		}
+		edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
+				data_type, count, 1, 0, ASYNC);
+		edma_set_src(davinci_spi_dma->dma_rx_channel,
+				rx_reg, INCR, W8BIT);
+		edma_set_dest(davinci_spi_dma->dma_rx_channel,
+				t->rx_dma, INCR, W8BIT);
+		edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
+		edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
+				data_type, 0);
+	}
+
+	if ((t->tx_buf) || (t->rx_buf))
+		edma_start(davinci_spi_dma->dma_tx_channel);
+
+	if (t->rx_buf)
+		edma_start(davinci_spi_dma->dma_rx_channel);
+
+	if ((t->rx_buf) || (t->tx_buf))
+		davinci_spi_set_dma_req(spi, 1);
+
+	if (t->tx_buf)
+		wait_for_completion_interruptible(
+				&davinci_spi_dma->dma_tx_completion);
+
+	if (t->rx_buf)
+		wait_for_completion_interruptible(
+				&davinci_spi_dma->dma_rx_completion);
+
+	dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
+
+	if (t->rx_buf)
+		dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
+
+	/*
+	 * Check for bit error, desync error,parity error,timeout error and
+	 * receive overflow errors
+	 */
+	int_status = ioread32(davinci_spi->base + SPIFLG);
+
+	ret = davinci_spi_check_error(davinci_spi, int_status);
+	if (ret != 0)
+		return ret;
+
+	/* SPI Framework maintains the count only in bytes so convert back */
+	davinci_spi->count *= conv;
+
+	return t->len;
+}
+
+/**
+ * davinci_spi_irq - IRQ handler for DaVinci SPI
+ * @irq: IRQ number for this SPI Master
+ * @context_data: structure for SPI Master controller davinci_spi
+ */
+static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
+{
+	struct davinci_spi *davinci_spi = context_data;
+	u32 int_status, rx_data = 0;
+	irqreturn_t ret = IRQ_NONE;
+
+	int_status = ioread32(davinci_spi->base + SPIFLG);
+
+	while ((int_status & SPIFLG_RX_INTR_MASK)) {
+		if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
+			ret = IRQ_HANDLED;
+
+			rx_data = ioread32(davinci_spi->base + SPIBUF);
+			davinci_spi->get_rx(rx_data, davinci_spi);
+
+			/* Disable Receive Interrupt */
+			iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
+					davinci_spi->base + SPIINT);
+		} else
+			(void)davinci_spi_check_error(davinci_spi, int_status);
+
+		int_status = ioread32(davinci_spi->base + SPIFLG);
+	}
+
+	return ret;
+}
+
+/**
+ * davinci_spi_probe - probe function for SPI Master Controller
+ * @pdev: platform_device structure which contains plateform specific data
+ */
+static int davinci_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master *master;
+	struct davinci_spi *davinci_spi;
+	struct davinci_spi_platform_data *pdata;
+	struct resource *r, *mem;
+	resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
+	resource_size_t	dma_tx_chan = SPI_NO_RESOURCE;
+	resource_size_t	dma_eventq = SPI_NO_RESOURCE;
+	int i = 0, ret = 0;
+
+	pdata = pdev->dev.platform_data;
+	if (pdata == NULL) {
+		ret = -ENODEV;
+		goto err;
+	}
+
+	master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
+	if (master == NULL) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	dev_set_drvdata(&pdev->dev, master);
+
+	davinci_spi = spi_master_get_devdata(master);
+	if (davinci_spi == NULL) {
+		ret = -ENOENT;
+		goto free_master;
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (r == NULL) {
+		ret = -ENOENT;
+		goto free_master;
+	}
+
+	davinci_spi->pbase = r->start;
+	davinci_spi->region_size = resource_size(r);
+	davinci_spi->pdata = pdata;
+
+	mem = request_mem_region(r->start, davinci_spi->region_size,
+					pdev->name);
+	if (mem == NULL) {
+		ret = -EBUSY;
+		goto free_master;
+	}
+
+	davinci_spi->base = (struct davinci_spi_reg __iomem *)
+			ioremap(r->start, davinci_spi->region_size);
+	if (davinci_spi->base == NULL) {
+		ret = -ENOMEM;
+		goto release_region;
+	}
+
+	davinci_spi->irq = platform_get_irq(pdev, 0);
+	if (davinci_spi->irq <= 0) {
+		ret = -EINVAL;
+		goto unmap_io;
+	}
+
+	ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
+			  dev_name(&pdev->dev), davinci_spi);
+	if (ret)
+		goto unmap_io;
+
+	/* Allocate tmp_buf for tx_buf */
+	davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
+	if (davinci_spi->tmp_buf == NULL) {
+		ret = -ENOMEM;
+		goto irq_free;
+	}
+
+	davinci_spi->bitbang.master = spi_master_get(master);
+	if (davinci_spi->bitbang.master == NULL) {
+		ret = -ENODEV;
+		goto free_tmp_buf;
+	}
+
+	davinci_spi->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(davinci_spi->clk)) {
+		ret = -ENODEV;
+		goto put_master;
+	}
+	clk_enable(davinci_spi->clk);
+
+
+	master->bus_num = pdev->id;
+	master->num_chipselect = pdata->num_chipselect;
+	master->setup = davinci_spi_setup;
+	master->cleanup = davinci_spi_cleanup;
+
+	davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
+	davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
+
+	davinci_spi->version = pdata->version;
+	use_dma = pdata->use_dma;
+
+	davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
+	if (davinci_spi->version == SPI_VERSION_2)
+		davinci_spi->bitbang.flags |= SPI_READY;
+
+	if (use_dma) {
+			r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+			if (r)
+				dma_rx_chan = r->start;
+			r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+			if (r)
+				dma_tx_chan = r->start;
+			r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
+			if (r)
+				dma_eventq = r->start;
+	}
+
+	if (!use_dma ||
+	    dma_rx_chan == SPI_NO_RESOURCE ||
+	    dma_tx_chan == SPI_NO_RESOURCE ||
+	    dma_eventq	== SPI_NO_RESOURCE) {
+		davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
+		use_dma = 0;
+	} else {
+		davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
+		davinci_spi->dma_channels = kzalloc(master->num_chipselect
+				* sizeof(struct davinci_spi_dma), GFP_KERNEL);
+		if (davinci_spi->dma_channels == NULL) {
+			ret = -ENOMEM;
+			goto free_clk;
+		}
+
+		for (i = 0; i < master->num_chipselect; i++) {
+			davinci_spi->dma_channels[i].dma_rx_channel = -1;
+			davinci_spi->dma_channels[i].dma_rx_sync_dev =
+				dma_rx_chan;
+			davinci_spi->dma_channels[i].dma_tx_channel = -1;
+			davinci_spi->dma_channels[i].dma_tx_sync_dev =
+				dma_tx_chan;
+			davinci_spi->dma_channels[i].eventq = dma_eventq;
+		}
+		dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
+				"Using RX channel = %d , TX channel = %d and "
+				"event queue = %d", dma_rx_chan, dma_tx_chan,
+				dma_eventq);
+	}
+
+	davinci_spi->get_rx = davinci_spi_rx_buf_u8;
+	davinci_spi->get_tx = davinci_spi_tx_buf_u8;
+
+	init_completion(&davinci_spi->done);
+
+	/* Reset In/OUT SPI module */
+	iowrite32(0, davinci_spi->base + SPIGCR0);
+	udelay(100);
+	iowrite32(1, davinci_spi->base + SPIGCR0);
+
+	/* Clock internal */
+	if (davinci_spi->pdata->clk_internal)
+		set_io_bits(davinci_spi->base + SPIGCR1,
+				SPIGCR1_CLKMOD_MASK);
+	else
+		clear_io_bits(davinci_spi->base + SPIGCR1,
+				SPIGCR1_CLKMOD_MASK);
+
+	/* master mode default */
+	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
+
+	if (davinci_spi->pdata->intr_level)
+		iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
+	else
+		iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
+
+	ret = spi_bitbang_start(&davinci_spi->bitbang);
+	if (ret)
+		goto free_clk;
+
+	dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base);
+
+	if (!pdata->poll_mode)
+		dev_info(&pdev->dev, "Operating in interrupt mode"
+			" using IRQ %d\n", davinci_spi->irq);
+
+	return ret;
+
+free_clk:
+	clk_disable(davinci_spi->clk);
+	clk_put(davinci_spi->clk);
+put_master:
+	spi_master_put(master);
+free_tmp_buf:
+	kfree(davinci_spi->tmp_buf);
+irq_free:
+	free_irq(davinci_spi->irq, davinci_spi);
+unmap_io:
+	iounmap(davinci_spi->base);
+release_region:
+	release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
+free_master:
+	kfree(master);
+err:
+	return ret;
+}
+
+/**
+ * davinci_spi_remove - remove function for SPI Master Controller
+ * @pdev: platform_device structure which contains plateform specific data
+ *
+ * This function will do the reverse action of davinci_spi_probe function
+ * It will free the IRQ and SPI controller's memory region.
+ * It will also call spi_bitbang_stop to destroy the work queue which was
+ * created by spi_bitbang_start.
+ */
+static int __exit davinci_spi_remove(struct platform_device *pdev)
+{
+	struct davinci_spi *davinci_spi;
+	struct spi_master *master;
+
+	master = dev_get_drvdata(&pdev->dev);
+	davinci_spi = spi_master_get_devdata(master);
+
+	spi_bitbang_stop(&davinci_spi->bitbang);
+
+	clk_disable(davinci_spi->clk);
+	clk_put(davinci_spi->clk);
+	spi_master_put(master);
+	kfree(davinci_spi->tmp_buf);
+	free_irq(davinci_spi->irq, davinci_spi);
+	iounmap(davinci_spi->base);
+	release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
+
+	return 0;
+}
+
+static struct platform_driver davinci_spi_driver = {
+	.driver.name = "spi_davinci",
+	.remove = __exit_p(davinci_spi_remove),
+};
+
+static int __init davinci_spi_init(void)
+{
+	return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
+}
+module_init(davinci_spi_init);
+
+static void __exit davinci_spi_exit(void)
+{
+	platform_driver_unregister(&davinci_spi_driver);
+}
+module_exit(davinci_spi_exit);
+
+MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index 31620fa..8ed38f1 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -152,6 +152,7 @@
 #else
 static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
 {
+	return 0;
 }
 
 static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
@@ -161,14 +162,14 @@
 
 static void wait_till_not_busy(struct dw_spi *dws)
 {
-	unsigned long end = jiffies + usecs_to_jiffies(1000);
+	unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);
 
 	while (time_before(jiffies, end)) {
 		if (!(dw_readw(dws, sr) & SR_BUSY))
 			return;
 	}
 	dev_err(&dws->master->dev,
-		"DW SPI: Stutus keeps busy for 1000us after a read/write!\n");
+		"DW SPI: Status keeps busy for 1000us after a read/write!\n");
 }
 
 static void flush(struct dw_spi *dws)
@@ -358,6 +359,8 @@
 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
 {
 	u16 irq_status, irq_mask = 0x3f;
+	u32 int_level = dws->fifo_len / 2;
+	u32 left;
 
 	irq_status = dw_readw(dws, isr) & irq_mask;
 	/* Error handling */
@@ -369,22 +372,23 @@
 		return IRQ_HANDLED;
 	}
 
-	/* INT comes from tx */
-	if (dws->tx && (irq_status & SPI_INT_TXEI)) {
-		while (dws->tx < dws->tx_end)
+	if (irq_status & SPI_INT_TXEI) {
+		spi_mask_intr(dws, SPI_INT_TXEI);
+
+		left = (dws->tx_end - dws->tx) / dws->n_bytes;
+		left = (left > int_level) ? int_level : left;
+
+		while (left--)
 			dws->write(dws);
+		dws->read(dws);
 
-		if (dws->tx == dws->tx_end) {
-			spi_mask_intr(dws, SPI_INT_TXEI);
-			transfer_complete(dws);
-		}
-	}
-
-	/* INT comes from rx */
-	if (dws->rx && (irq_status & SPI_INT_RXFI)) {
-		if (dws->read(dws))
+		/* Re-enable the IRQ if there is still data left to tx */
+		if (dws->tx_end > dws->tx)
+			spi_umask_intr(dws, SPI_INT_TXEI);
+		else
 			transfer_complete(dws);
 	}
+
 	return IRQ_HANDLED;
 }
 
@@ -404,12 +408,9 @@
 /* Must be called inside pump_transfers() */
 static void poll_transfer(struct dw_spi *dws)
 {
-	if (dws->tx) {
-		while (dws->write(dws))
-			dws->read(dws);
-	}
+	while (dws->write(dws))
+		dws->read(dws);
 
-	dws->read(dws);
 	transfer_complete(dws);
 }
 
@@ -428,6 +429,7 @@
 	u8 bits = 0;
 	u8 imask = 0;
 	u8 cs_change = 0;
+	u16 txint_level = 0;
 	u16 clk_div = 0;
 	u32 speed = 0;
 	u32 cr0 = 0;
@@ -438,6 +440,9 @@
 	chip = dws->cur_chip;
 	spi = message->spi;
 
+	if (unlikely(!chip->clk_div))
+		chip->clk_div = dws->max_freq / chip->speed_hz;
+
 	if (message->state == ERROR_STATE) {
 		message->status = -EIO;
 		goto early_exit;
@@ -492,7 +497,7 @@
 
 			/* clk_div doesn't support odd number */
 			clk_div = dws->max_freq / speed;
-			clk_div = (clk_div >> 1) << 1;
+			clk_div = (clk_div + 1) & 0xfffe;
 
 			chip->speed_hz = speed;
 			chip->clk_div = clk_div;
@@ -532,14 +537,35 @@
 	}
 	message->state = RUNNING_STATE;
 
+	/*
+	 * Adjust transfer mode if necessary. Requires platform dependent
+	 * chipselect mechanism.
+	 */
+	if (dws->cs_control) {
+		if (dws->rx && dws->tx)
+			chip->tmode = 0x00;
+		else if (dws->rx)
+			chip->tmode = 0x02;
+		else
+			chip->tmode = 0x01;
+
+		cr0 &= ~(0x3 << SPI_MODE_OFFSET);
+		cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
+	}
+
 	/* Check if current transfer is a DMA transaction */
 	dws->dma_mapped = map_dma_buffers(dws);
 
+	/*
+	 * Interrupt mode
+	 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
+	 */
 	if (!dws->dma_mapped && !chip->poll_mode) {
-		if (dws->rx)
-			imask |= SPI_INT_RXFI;
-		if (dws->tx)
-			imask |= SPI_INT_TXEI;
+		int templen = dws->len / dws->n_bytes;
+		txint_level = dws->fifo_len / 2;
+		txint_level = (templen > txint_level) ? txint_level : templen;
+
+		imask |= SPI_INT_TXEI;
 		dws->transfer_handler = interrupt_transfer;
 	}
 
@@ -549,21 +575,23 @@
 	 *	2. clk_div is changed
 	 *	3. control value changes
 	 */
-	if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div) {
+	if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) {
 		spi_enable_chip(dws, 0);
 
 		if (dw_readw(dws, ctrl0) != cr0)
 			dw_writew(dws, ctrl0, cr0);
 
-		/* Set the interrupt mask, for poll mode just diable all int */
-		spi_mask_intr(dws, 0xff);
-		if (!chip->poll_mode)
-			spi_umask_intr(dws, imask);
-
 		spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
 		spi_chip_sel(dws, spi->chip_select);
-		spi_enable_chip(dws, 1);
 
+		/* Set the interrupt mask, for poll mode just diable all int */
+		spi_mask_intr(dws, 0xff);
+		if (imask)
+			spi_umask_intr(dws, imask);
+		if (txint_level)
+			dw_writew(dws, txfltr, txint_level);
+
+		spi_enable_chip(dws, 1);
 		if (cs_change)
 			dws->prev_chip = chip;
 	}
@@ -712,11 +740,11 @@
 	}
 	chip->bits_per_word = spi->bits_per_word;
 
+	if (!spi->max_speed_hz) {
+		dev_err(&spi->dev, "No max speed HZ parameter\n");
+		return -EINVAL;
+	}
 	chip->speed_hz = spi->max_speed_hz;
-	if (chip->speed_hz)
-		chip->clk_div = 25000000 / chip->speed_hz;
-	else
-		chip->clk_div = 8;	/* default value */
 
 	chip->tmode = 0; /* Tx & Rx */
 	/* Default SPI mode is SCPOL = 0, SCPH = 0 */
@@ -735,7 +763,7 @@
 	kfree(chip);
 }
 
-static int __init init_queue(struct dw_spi *dws)
+static int __devinit init_queue(struct dw_spi *dws)
 {
 	INIT_LIST_HEAD(&dws->queue);
 	spin_lock_init(&dws->lock);
@@ -817,6 +845,22 @@
 	spi_mask_intr(dws, 0xff);
 	spi_enable_chip(dws, 1);
 	flush(dws);
+
+	/*
+	 * Try to detect the FIFO depth if not set by interface driver,
+	 * the depth could be from 2 to 256 from HW spec
+	 */
+	if (!dws->fifo_len) {
+		u32 fifo;
+		for (fifo = 2; fifo <= 257; fifo++) {
+			dw_writew(dws, txfltr, fifo);
+			if (fifo != dw_readw(dws, txfltr))
+				break;
+		}
+
+		dws->fifo_len = (fifo == 257) ? 0 : fifo;
+		dw_writew(dws, txfltr, 0);
+	}
 }
 
 int __devinit dw_spi_add_host(struct dw_spi *dws)
@@ -913,6 +957,7 @@
 	/* Disconnect from the SPI framework */
 	spi_unregister_master(dws->master);
 }
+EXPORT_SYMBOL(dw_spi_remove_host);
 
 int dw_spi_suspend_host(struct dw_spi *dws)
 {
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c
new file mode 100644
index 0000000..e35b45a
--- /dev/null
+++ b/drivers/spi/dw_spi_mmio.c
@@ -0,0 +1,147 @@
+/*
+ * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core
+ *
+ * Copyright (c) 2010, Octasic semiconductor.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/dw_spi.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "dw_spi_mmio"
+
+struct dw_spi_mmio {
+	struct dw_spi  dws;
+	struct clk     *clk;
+};
+
+static int __devinit dw_spi_mmio_probe(struct platform_device *pdev)
+{
+	struct dw_spi_mmio *dwsmmio;
+	struct dw_spi *dws;
+	struct resource *mem, *ioarea;
+	int ret;
+
+	dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL);
+	if (!dwsmmio) {
+		ret = -ENOMEM;
+		goto err_end;
+	}
+
+	dws = &dwsmmio->dws;
+
+	/* Get basic io resource and map it */
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(&pdev->dev, "no mem resource?\n");
+		ret = -EINVAL;
+		goto err_kfree;
+	}
+
+	ioarea = request_mem_region(mem->start, resource_size(mem),
+			pdev->name);
+	if (!ioarea) {
+		dev_err(&pdev->dev, "SPI region already claimed\n");
+		ret = -EBUSY;
+		goto err_kfree;
+	}
+
+	dws->regs = ioremap_nocache(mem->start, resource_size(mem));
+	if (!dws->regs) {
+		dev_err(&pdev->dev, "SPI region already mapped\n");
+		ret = -ENOMEM;
+		goto err_release_reg;
+	}
+
+	dws->irq = platform_get_irq(pdev, 0);
+	if (dws->irq < 0) {
+		dev_err(&pdev->dev, "no irq resource?\n");
+		ret = dws->irq; /* -ENXIO */
+		goto err_unmap;
+	}
+
+	dwsmmio->clk = clk_get(&pdev->dev, NULL);
+	if (!dwsmmio->clk) {
+		ret = -ENODEV;
+		goto err_irq;
+	}
+	clk_enable(dwsmmio->clk);
+
+	dws->parent_dev = &pdev->dev;
+	dws->bus_num = 0;
+	dws->num_cs = 4;
+	dws->max_freq = clk_get_rate(dwsmmio->clk);
+
+	ret = dw_spi_add_host(dws);
+	if (ret)
+		goto err_clk;
+
+	platform_set_drvdata(pdev, dwsmmio);
+	return 0;
+
+err_clk:
+	clk_disable(dwsmmio->clk);
+	clk_put(dwsmmio->clk);
+	dwsmmio->clk = NULL;
+err_irq:
+	free_irq(dws->irq, dws);
+err_unmap:
+	iounmap(dws->regs);
+err_release_reg:
+	release_mem_region(mem->start, resource_size(mem));
+err_kfree:
+	kfree(dwsmmio);
+err_end:
+	return ret;
+}
+
+static int __devexit dw_spi_mmio_remove(struct platform_device *pdev)
+{
+	struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
+	struct resource *mem;
+
+	platform_set_drvdata(pdev, NULL);
+
+	clk_disable(dwsmmio->clk);
+	clk_put(dwsmmio->clk);
+	dwsmmio->clk = NULL;
+
+	free_irq(dwsmmio->dws.irq, &dwsmmio->dws);
+	dw_spi_remove_host(&dwsmmio->dws);
+	iounmap(dwsmmio->dws.regs);
+	kfree(dwsmmio);
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(mem->start, resource_size(mem));
+	return 0;
+}
+
+static struct platform_driver dw_spi_mmio_driver = {
+	.remove		= __devexit_p(dw_spi_mmio_remove),
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init dw_spi_mmio_init(void)
+{
+	return platform_driver_probe(&dw_spi_mmio_driver, dw_spi_mmio_probe);
+}
+module_init(dw_spi_mmio_init);
+
+static void __exit dw_spi_mmio_exit(void)
+{
+	platform_driver_unregister(&dw_spi_mmio_driver);
+}
+module_exit(dw_spi_mmio_exit);
+
+MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>");
+MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c
index 34ba691..1f0735f 100644
--- a/drivers/spi/dw_spi_pci.c
+++ b/drivers/spi/dw_spi_pci.c
@@ -73,6 +73,7 @@
 	dws->num_cs = 4;
 	dws->max_freq = 25000000;	/* for Moorestwon */
 	dws->irq = pdev->irq;
+	dws->fifo_len = 40;		/* FIFO has 40 words buffer */
 
 	ret = dw_spi_add_host(dws);
 	if (ret)
@@ -98,6 +99,7 @@
 	struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
 
 	pci_set_drvdata(pdev, NULL);
+	dw_spi_remove_host(&dwpci->dws);
 	iounmap(dwpci->dws.regs);
 	pci_release_region(pdev, 0);
 	kfree(dwpci);
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index f50c81d..0474786 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -503,7 +503,7 @@
 	return mpc52xx_psc_spi_do_remove(&op->dev);
 }
 
-static struct of_device_id mpc52xx_psc_spi_of_match[] = {
+static const struct of_device_id mpc52xx_psc_spi_of_match[] = {
 	{ .compatible = "fsl,mpc5200-psc-spi", },
 	{ .compatible = "mpc5200-psc-spi", }, /* old */
 	{}
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c
index 45bfe64..6eab465 100644
--- a/drivers/spi/mpc52xx_spi.c
+++ b/drivers/spi/mpc52xx_spi.c
@@ -550,7 +550,7 @@
 	return 0;
 }
 
-static struct of_device_id mpc52xx_spi_match[] __devinitdata = {
+static const struct of_device_id mpc52xx_spi_match[] __devinitconst = {
 	{ .compatible = "fsl,mpc5200-spi", },
 	{}
 };
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 1893f1e..0ddbbe4 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -469,7 +469,7 @@
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
 	int gpio = spi_imx->chipselect[spi->chip_select];
 
-	pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__,
+	dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
 		 spi->mode, spi->bits_per_word, spi->max_speed_hz);
 
 	if (gpio >= 0)
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index 1fb2a6e..4f0cc9d 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -365,7 +365,7 @@
 
 	if ((mpc8xxx_spi->spibrg / hz) > 64) {
 		cs->hw_mode |= SPMODE_DIV16;
-		pm = mpc8xxx_spi->spibrg / (hz * 64);
+		pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
 
 		WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
 			  "Will use %d Hz instead.\n", dev_name(&spi->dev),
@@ -373,7 +373,7 @@
 		if (pm > 16)
 			pm = 16;
 	} else
-		pm = mpc8xxx_spi->spibrg / (hz * 4);
+		pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
 	if (pm)
 		pm--;
 
@@ -1328,7 +1328,7 @@
 static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
 {
 	struct resource *mem;
-	unsigned int irq;
+	int irq;
 	struct spi_master *master;
 
 	if (!pdev->dev.platform_data)
@@ -1339,7 +1339,7 @@
 		return -EINVAL;
 
 	irq = platform_get_irq(pdev, 0);
-	if (!irq)
+	if (irq <= 0)
 		return -EINVAL;
 
 	master = mpc8xxx_spi_probe(&pdev->dev, mem, irq);
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c
index 140a18d..6d8d402 100644
--- a/drivers/spi/spi_ppc4xx.c
+++ b/drivers/spi/spi_ppc4xx.c
@@ -578,7 +578,7 @@
 	return 0;
 }
 
-static struct of_device_id spi_ppc4xx_of_match[] = {
+static const struct of_device_id spi_ppc4xx_of_match[] = {
 	{ .compatible = "ibm,ppc4xx-spi", },
 	{},
 };
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
index 88a456d..9736581 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi_s3c64xx.c
@@ -28,7 +28,7 @@
 #include <linux/spi/spi.h>
 
 #include <mach/dma.h>
-#include <plat/spi.h>
+#include <plat/s3c64xx-spi.h>
 
 /* Registers and bit-fields */
 
@@ -137,6 +137,7 @@
 /**
  * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
  * @clk: Pointer to the spi clock.
+ * @src_clk: Pointer to the clock used to generate SPI signals.
  * @master: Pointer to the SPI Protocol master.
  * @workqueue: Work queue for the SPI xfer requests.
  * @cntrlr_info: Platform specific data for the controller this driver manages.
@@ -157,10 +158,11 @@
 struct s3c64xx_spi_driver_data {
 	void __iomem                    *regs;
 	struct clk                      *clk;
+	struct clk                      *src_clk;
 	struct platform_device          *pdev;
 	struct spi_master               *master;
 	struct workqueue_struct	        *workqueue;
-	struct s3c64xx_spi_cntrlr_info  *cntrlr_info;
+	struct s3c64xx_spi_info  *cntrlr_info;
 	struct spi_device               *tgl_spi;
 	struct work_struct              work;
 	struct list_head                queue;
@@ -180,7 +182,7 @@
 
 static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
 {
-	struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
 	void __iomem *regs = sdd->regs;
 	unsigned long loops;
 	u32 val;
@@ -225,7 +227,7 @@
 				struct spi_device *spi,
 				struct spi_transfer *xfer, int dma_mode)
 {
-	struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
 	void __iomem *regs = sdd->regs;
 	u32 modecfg, chcfg;
 
@@ -298,19 +300,20 @@
 		if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
 			/* Deselect the last toggled device */
 			cs = sdd->tgl_spi->controller_data;
-			cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
+			cs->set_level(cs->line,
+					spi->mode & SPI_CS_HIGH ? 0 : 1);
 		}
 		sdd->tgl_spi = NULL;
 	}
 
 	cs = spi->controller_data;
-	cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0);
+	cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
 }
 
 static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
 				struct spi_transfer *xfer, int dma_mode)
 {
-	struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
 	void __iomem *regs = sdd->regs;
 	unsigned long val;
 	int ms;
@@ -384,12 +387,11 @@
 	if (sdd->tgl_spi == spi)
 		sdd->tgl_spi = NULL;
 
-	cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
+	cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
 }
 
 static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
 {
-	struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
 	void __iomem *regs = sdd->regs;
 	u32 val;
 
@@ -435,7 +437,7 @@
 	/* Configure Clock */
 	val = readl(regs + S3C64XX_SPI_CLK_CFG);
 	val &= ~S3C64XX_SPI_PSR_MASK;
-	val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1)
+	val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
 			& S3C64XX_SPI_PSR_MASK);
 	writel(val, regs + S3C64XX_SPI_CLK_CFG);
 
@@ -558,7 +560,7 @@
 static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
 					struct spi_message *msg)
 {
-	struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
 	struct spi_device *spi = msg->spi;
 	struct s3c64xx_spi_csinfo *cs = spi->controller_data;
 	struct spi_transfer *xfer;
@@ -632,8 +634,8 @@
 		S3C64XX_SPI_DEACT(sdd);
 
 		if (status) {
-			dev_err(&spi->dev, "I/O Error: \
-				rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
+			dev_err(&spi->dev, "I/O Error: "
+				"rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
 				xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
 				(sdd->state & RXBUSY) ? 'f' : 'p',
 				(sdd->state & TXBUSY) ? 'f' : 'p',
@@ -786,7 +788,7 @@
 {
 	struct s3c64xx_spi_csinfo *cs = spi->controller_data;
 	struct s3c64xx_spi_driver_data *sdd;
-	struct s3c64xx_spi_cntrlr_info *sci;
+	struct s3c64xx_spi_info *sci;
 	struct spi_message *msg;
 	u32 psr, speed;
 	unsigned long flags;
@@ -831,17 +833,17 @@
 	}
 
 	/* Check if we can provide the requested rate */
-	speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */
+	speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */
 
 	if (spi->max_speed_hz > speed)
 		spi->max_speed_hz = speed;
 
-	psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1;
+	psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
 	psr &= S3C64XX_SPI_PSR_MASK;
 	if (psr == S3C64XX_SPI_PSR_MASK)
 		psr--;
 
-	speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
+	speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
 	if (spi->max_speed_hz < speed) {
 		if (psr+1 < S3C64XX_SPI_PSR_MASK) {
 			psr++;
@@ -851,7 +853,7 @@
 		}
 	}
 
-	speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
+	speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
 	if (spi->max_speed_hz >= speed)
 		spi->max_speed_hz = speed;
 	else
@@ -867,7 +869,7 @@
 
 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
 {
-	struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
 	void __iomem *regs = sdd->regs;
 	unsigned int val;
 
@@ -902,7 +904,7 @@
 {
 	struct resource	*mem_res, *dmatx_res, *dmarx_res;
 	struct s3c64xx_spi_driver_data *sdd;
-	struct s3c64xx_spi_cntrlr_info *sci;
+	struct s3c64xx_spi_info *sci;
 	struct spi_master *master;
 	int ret;
 
@@ -1000,18 +1002,15 @@
 		goto err4;
 	}
 
-	if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK)
-		sci->src_clk = sdd->clk;
-	else
-		sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
-	if (IS_ERR(sci->src_clk)) {
+	sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
+	if (IS_ERR(sdd->src_clk)) {
 		dev_err(&pdev->dev,
 			"Unable to acquire clock '%s'\n", sci->src_clk_name);
-		ret = PTR_ERR(sci->src_clk);
+		ret = PTR_ERR(sdd->src_clk);
 		goto err5;
 	}
 
-	if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) {
+	if (clk_enable(sdd->src_clk)) {
 		dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
 							sci->src_clk_name);
 		ret = -EBUSY;
@@ -1040,11 +1039,10 @@
 		goto err8;
 	}
 
-	dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \
-					with %d Slaves attached\n",
+	dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d "
+					"with %d Slaves attached\n",
 					pdev->id, master->num_chipselect);
-	dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\
-					\tDMA=[Rx-%d, Tx-%d]\n",
+	dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
 					mem_res->end, mem_res->start,
 					sdd->rx_dmach, sdd->tx_dmach);
 
@@ -1053,11 +1051,9 @@
 err8:
 	destroy_workqueue(sdd->workqueue);
 err7:
-	if (sci->src_clk != sdd->clk)
-		clk_disable(sci->src_clk);
+	clk_disable(sdd->src_clk);
 err6:
-	if (sci->src_clk != sdd->clk)
-		clk_put(sci->src_clk);
+	clk_put(sdd->src_clk);
 err5:
 	clk_disable(sdd->clk);
 err4:
@@ -1078,7 +1074,6 @@
 {
 	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
-	struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
 	struct resource	*mem_res;
 	unsigned long flags;
 
@@ -1093,11 +1088,8 @@
 
 	destroy_workqueue(sdd->workqueue);
 
-	if (sci->src_clk != sdd->clk)
-		clk_disable(sci->src_clk);
-
-	if (sci->src_clk != sdd->clk)
-		clk_put(sci->src_clk);
+	clk_disable(sdd->src_clk);
+	clk_put(sdd->src_clk);
 
 	clk_disable(sdd->clk);
 	clk_put(sdd->clk);
@@ -1105,7 +1097,8 @@
 	iounmap((void *) sdd->regs);
 
 	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	release_mem_region(mem_res->start, resource_size(mem_res));
+	if (mem_res != NULL)
+		release_mem_region(mem_res->start, resource_size(mem_res));
 
 	platform_set_drvdata(pdev, NULL);
 	spi_master_put(master);
@@ -1118,8 +1111,6 @@
 {
 	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
-	struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
-	struct s3c64xx_spi_csinfo *cs;
 	unsigned long flags;
 
 	spin_lock_irqsave(&sdd->lock, flags);
@@ -1130,9 +1121,7 @@
 		msleep(10);
 
 	/* Disable the clock */
-	if (sci->src_clk != sdd->clk)
-		clk_disable(sci->src_clk);
-
+	clk_disable(sdd->src_clk);
 	clk_disable(sdd->clk);
 
 	sdd->cur_speed = 0; /* Output Clock is stopped */
@@ -1144,15 +1133,13 @@
 {
 	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
-	struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
 	unsigned long flags;
 
 	sci->cfg_gpio(pdev);
 
 	/* Enable the clock */
-	if (sci->src_clk != sdd->clk)
-		clk_enable(sci->src_clk);
-
+	clk_enable(sdd->src_clk);
 	clk_enable(sdd->clk);
 
 	s3c64xx_spi_hwinit(sdd, pdev->id);
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index 30973ec..d93b667 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -20,12 +20,12 @@
 #include <linux/bitmap.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/err.h>
 
 #include <linux/spi/spi.h>
 #include <linux/spi/spi_bitbang.h>
 #include <linux/spi/sh_msiof.h>
 
-#include <asm/spi.h>
 #include <asm/unaligned.h>
 
 struct sh_msiof_spi_priv {
diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi_stmp.c
index 2552bb3..fadff76 100644
--- a/drivers/spi/spi_stmp.c
+++ b/drivers/spi/spi_stmp.c
@@ -76,7 +76,7 @@
 			break;						\
 		}							\
 		cpu_relax();						\
-	} while (time_before(end_jiffies, jiffies));			\
+	} while (time_before(jiffies, end_jiffies));			\
 	succeeded;							\
 	})
 
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 9f38637..1b47363 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -93,6 +93,26 @@
 	void (*rx_fn) (struct xilinx_spi *);
 };
 
+static void xspi_write32(u32 val, void __iomem *addr)
+{
+	iowrite32(val, addr);
+}
+
+static unsigned int xspi_read32(void __iomem *addr)
+{
+	return ioread32(addr);
+}
+
+static void xspi_write32_be(u32 val, void __iomem *addr)
+{
+	iowrite32be(val, addr);
+}
+
+static unsigned int xspi_read32_be(void __iomem *addr)
+{
+	return ioread32be(addr);
+}
+
 static void xspi_tx8(struct xilinx_spi *xspi)
 {
 	xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET);
@@ -374,11 +394,11 @@
 	xspi->mem = *mem;
 	xspi->irq = irq;
 	if (pdata->little_endian) {
-		xspi->read_fn = ioread32;
-		xspi->write_fn = iowrite32;
+		xspi->read_fn = xspi_read32;
+		xspi->write_fn = xspi_write32;
 	} else {
-		xspi->read_fn = ioread32be;
-		xspi->write_fn = iowrite32be;
+		xspi->read_fn = xspi_read32_be;
+		xspi->write_fn = xspi_write32_be;
 	}
 	xspi->bits_per_word = pdata->bits_per_word;
 	if (xspi->bits_per_word == 8) {
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c
index 71dc3ad..ed34a8d 100644
--- a/drivers/spi/xilinx_spi_of.c
+++ b/drivers/spi/xilinx_spi_of.c
@@ -99,7 +99,7 @@
 	return xilinx_spi_remove(op);
 }
 
-static struct of_device_id xilinx_spi_of_match[] = {
+static const struct of_device_id xilinx_spi_of_match[] = {
 	{ .compatible = "xlnx,xps-spi-2.00.a", },
 	{ .compatible = "xlnx,xps-spi-2.00.b", },
 	{}
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index e9f9954..bbeeb92 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -78,7 +78,7 @@
 MODULE_DESCRIPTION("USB Mass Storage driver for Linux");
 MODULE_LICENSE("GPL");
 
-static unsigned int delay_use = 5;
+static unsigned int delay_use = 1;
 module_param(delay_use, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
 
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index e4e4d43..9ee67d6 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1931,22 +1931,22 @@
 			 * PowerMac2,2 summer 2000 iMacs
 			 * PowerMac4,1 january 2001 iMacs "flower power"
 			 */
-			if (machine_is_compatible("PowerMac2,1") ||
-			    machine_is_compatible("PowerMac2,2") ||
-			    machine_is_compatible("PowerMac4,1"))
+			if (of_machine_is_compatible("PowerMac2,1") ||
+			    of_machine_is_compatible("PowerMac2,2") ||
+			    of_machine_is_compatible("PowerMac4,1"))
 				default_vmode = VMODE_1024_768_75;
 
 			/* iBook SE */
-			if (machine_is_compatible("PowerBook2,2"))
+			if (of_machine_is_compatible("PowerBook2,2"))
 				default_vmode = VMODE_800_600_60;
 
 			/* PowerBook Firewire (Pismo), iBook Dual USB */
-			if (machine_is_compatible("PowerBook3,1") ||
-			    machine_is_compatible("PowerBook4,1"))
+			if (of_machine_is_compatible("PowerBook3,1") ||
+			    of_machine_is_compatible("PowerBook4,1"))
 				default_vmode = VMODE_1024_768_60;
 
 			/* PowerBook Titanium */
-			if (machine_is_compatible("PowerBook3,2"))
+			if (of_machine_is_compatible("PowerBook3,2"))
 				default_vmode = VMODE_1152_768_60;
 	
 			if (default_cmode > 16) 
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 1ddeb4c..e45ab8d 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2439,7 +2439,7 @@
 	 * The Apple iBook1 uses non-standard memory frequencies.
 	 * We detect it and set the frequency manually.
 	 */
-	if (machine_is_compatible("PowerBook2,1")) {
+	if (of_machine_is_compatible("PowerBook2,1")) {
 		par->pll_limits.mclk = 70;
 		par->pll_limits.xclk = 53;
 	}
@@ -2659,7 +2659,7 @@
 		      FBINFO_HWACCEL_YPAN;
 
 #ifdef CONFIG_PMAC_BACKLIGHT
-	if (M64_HAS(G3_PB_1_1) && machine_is_compatible("PowerBook1,1")) {
+	if (M64_HAS(G3_PB_1_1) && of_machine_is_compatible("PowerBook1,1")) {
 		/*
 		 * these bits let the 101 powerbook
 		 * wake up from sleep -- paulus
@@ -2690,9 +2690,9 @@
 				if (M64_HAS(G3_PB_1024x768))
 					/* G3 PowerBook with 1024x768 LCD */
 					default_vmode = VMODE_1024_768_60;
-				else if (machine_is_compatible("iMac"))
+				else if (of_machine_is_compatible("iMac"))
 					default_vmode = VMODE_1024_768_75;
-				else if (machine_is_compatible("PowerBook2,1"))
+				else if (of_machine_is_compatible("PowerBook2,1"))
 					/* iBook with 800x600 LCD */
 					default_vmode = VMODE_800_600_60;
 				else
@@ -3104,7 +3104,7 @@
 	}
 
 	dp = pci_device_to_OF_node(pdev);
-	if (node == dp->node) {
+	if (node == dp->phandle) {
 		struct fb_var_screeninfo *var = &default_var;
 		unsigned int N, P, Q, M, T, R;
 		u32 v_total, h_total;
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index 1a056ad..fa1198c 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -175,9 +175,9 @@
 
 #ifdef CONFIG_PMAC_BACKLIGHT
 	pdata->negative = pdata->negative ||
-		machine_is_compatible("PowerBook4,3") ||
-		machine_is_compatible("PowerBook6,3") ||
-		machine_is_compatible("PowerBook6,5");
+		of_machine_is_compatible("PowerBook4,3") ||
+		of_machine_is_compatible("PowerBook6,3") ||
+		of_machine_is_compatible("PowerBook6,5");
 #endif
 
 	rinfo->info->bl_dev = bd;
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index 53f8f11..f997510 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -831,7 +831,7 @@
 	printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node);
 
 	pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len,
-			      fb_info->fix.id, pgprot_val(PAGE_SHARED));
+			      fb_info->fix.id, PAGE_SHARED);
 
 	printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n",
 	       fb_info->node, pvr2fb_map);
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index a69830d..8d7653e 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -19,6 +19,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
 #include <linux/vmalloc.h>
+#include <linux/ioctl.h>
 #include <video/sh_mobile_lcdc.h>
 #include <asm/atomic.h>
 
@@ -106,6 +107,7 @@
 #define LDRCNTR_SRC	0x00010000
 #define LDRCNTR_MRS	0x00000002
 #define LDRCNTR_MRC	0x00000001
+#define LDSR_MRS	0x00000100
 
 struct sh_mobile_lcdc_priv;
 struct sh_mobile_lcdc_chan {
@@ -122,8 +124,8 @@
 	struct scatterlist *sglist;
 	unsigned long frame_end;
 	unsigned long pan_offset;
-	unsigned long new_pan_offset;
 	wait_queue_head_t frame_end_wait;
+	struct completion vsync_completion;
 };
 
 struct sh_mobile_lcdc_priv {
@@ -366,19 +368,8 @@
 		}
 
 		/* VSYNC End */
-		if (ldintr & LDINTR_VES) {
-			unsigned long ldrcntr = lcdc_read(priv, _LDRCNTR);
-			/* Set the source address for the next refresh */
-			lcdc_write_chan_mirror(ch, LDSA1R, ch->dma_handle +
-					       ch->new_pan_offset);
-			if (lcdc_chan_is_sublcd(ch))
-				lcdc_write(ch->lcdc, _LDRCNTR,
-					   ldrcntr ^ LDRCNTR_SRS);
-			else
-				lcdc_write(ch->lcdc, _LDRCNTR,
-					   ldrcntr ^ LDRCNTR_MRS);
-			ch->pan_offset = ch->new_pan_offset;
-		}
+		if (ldintr & LDINTR_VES)
+			complete(&ch->vsync_completion);
 	}
 
 	return IRQ_HANDLED;
@@ -767,25 +758,69 @@
 				     struct fb_info *info)
 {
 	struct sh_mobile_lcdc_chan *ch = info->par;
+	struct sh_mobile_lcdc_priv *priv = ch->lcdc;
+	unsigned long ldrcntr;
+	unsigned long new_pan_offset;
 
-	if (info->var.xoffset == var->xoffset &&
-	    info->var.yoffset == var->yoffset)
-		return 0;	/* No change, do nothing */
-
-	ch->new_pan_offset = (var->yoffset * info->fix.line_length) +
+	new_pan_offset = (var->yoffset * info->fix.line_length) +
 		(var->xoffset * (info->var.bits_per_pixel / 8));
 
-	if (ch->new_pan_offset != ch->pan_offset) {
-		unsigned long ldintr;
-		ldintr = lcdc_read(ch->lcdc, _LDINTR);
-		ldintr |= LDINTR_VEE;
-		lcdc_write(ch->lcdc, _LDINTR, ldintr);
-		sh_mobile_lcdc_deferred_io_touch(info);
-	}
+	if (new_pan_offset == ch->pan_offset)
+		return 0;	/* No change, do nothing */
+
+	ldrcntr = lcdc_read(priv, _LDRCNTR);
+
+	/* Set the source address for the next refresh */
+	lcdc_write_chan_mirror(ch, LDSA1R, ch->dma_handle + new_pan_offset);
+	if (lcdc_chan_is_sublcd(ch))
+		lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS);
+	else
+		lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_MRS);
+
+	ch->pan_offset = new_pan_offset;
+
+	sh_mobile_lcdc_deferred_io_touch(info);
 
 	return 0;
 }
 
+static int sh_mobile_wait_for_vsync(struct fb_info *info)
+{
+	struct sh_mobile_lcdc_chan *ch = info->par;
+	unsigned long ldintr;
+	int ret;
+
+	/* Enable VSync End interrupt */
+	ldintr = lcdc_read(ch->lcdc, _LDINTR);
+	ldintr |= LDINTR_VEE;
+	lcdc_write(ch->lcdc, _LDINTR, ldintr);
+
+	ret = wait_for_completion_interruptible_timeout(&ch->vsync_completion,
+							msecs_to_jiffies(100));
+	if (!ret)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int sh_mobile_ioctl(struct fb_info *info, unsigned int cmd,
+		       unsigned long arg)
+{
+	int retval;
+
+	switch (cmd) {
+	case FBIO_WAITFORVSYNC:
+		retval = sh_mobile_wait_for_vsync(info);
+		break;
+
+	default:
+		retval = -ENOIOCTLCMD;
+		break;
+	}
+	return retval;
+}
+
+
 static struct fb_ops sh_mobile_lcdc_ops = {
 	.owner          = THIS_MODULE,
 	.fb_setcolreg	= sh_mobile_lcdc_setcolreg,
@@ -795,6 +830,7 @@
 	.fb_copyarea	= sh_mobile_lcdc_copyarea,
 	.fb_imageblit	= sh_mobile_lcdc_imageblit,
 	.fb_pan_display = sh_mobile_fb_pan_display,
+	.fb_ioctl       = sh_mobile_ioctl,
 };
 
 static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp)
@@ -962,8 +998,8 @@
 			goto err1;
 		}
 		init_waitqueue_head(&priv->ch[i].frame_end_wait);
+		init_completion(&priv->ch[i].vsync_completion);
 		priv->ch[j].pan_offset = 0;
-		priv->ch[j].new_pan_offset = 0;
 
 		switch (pdata->ch[i].chan) {
 		case LCDC_CHAN_MAINLCD:
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 505be88..369f2ee 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -28,7 +28,7 @@
 struct virtio_balloon
 {
 	struct virtio_device *vdev;
-	struct virtqueue *inflate_vq, *deflate_vq;
+	struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
 
 	/* Where the ballooning thread waits for config to change. */
 	wait_queue_head_t config_change;
@@ -49,6 +49,10 @@
 	/* The array of pfns we tell the Host about. */
 	unsigned int num_pfns;
 	u32 pfns[256];
+
+	/* Memory statistics */
+	int need_stats_update;
+	struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
 };
 
 static struct virtio_device_id id_table[] = {
@@ -154,6 +158,72 @@
 	}
 }
 
+static inline void update_stat(struct virtio_balloon *vb, int idx,
+			       u16 tag, u64 val)
+{
+	BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
+	vb->stats[idx].tag = tag;
+	vb->stats[idx].val = val;
+}
+
+#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
+
+static void update_balloon_stats(struct virtio_balloon *vb)
+{
+	unsigned long events[NR_VM_EVENT_ITEMS];
+	struct sysinfo i;
+	int idx = 0;
+
+	all_vm_events(events);
+	si_meminfo(&i);
+
+	update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
+				pages_to_bytes(events[PSWPIN]));
+	update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
+				pages_to_bytes(events[PSWPOUT]));
+	update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
+	update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+	update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
+				pages_to_bytes(i.freeram));
+	update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
+				pages_to_bytes(i.totalram));
+}
+
+/*
+ * While most virtqueues communicate guest-initiated requests to the hypervisor,
+ * the stats queue operates in reverse.  The driver initializes the virtqueue
+ * with a single buffer.  From that point forward, all conversations consist of
+ * a hypervisor request (a call to this function) which directs us to refill
+ * the virtqueue with a fresh stats buffer.  Since stats collection can sleep,
+ * we notify our kthread which does the actual work via stats_handle_request().
+ */
+static void stats_request(struct virtqueue *vq)
+{
+	struct virtio_balloon *vb;
+	unsigned int len;
+
+	vb = vq->vq_ops->get_buf(vq, &len);
+	if (!vb)
+		return;
+	vb->need_stats_update = 1;
+	wake_up(&vb->config_change);
+}
+
+static void stats_handle_request(struct virtio_balloon *vb)
+{
+	struct virtqueue *vq;
+	struct scatterlist sg;
+
+	vb->need_stats_update = 0;
+	update_balloon_stats(vb);
+
+	vq = vb->stats_vq;
+	sg_init_one(&sg, vb->stats, sizeof(vb->stats));
+	if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
+		BUG();
+	vq->vq_ops->kick(vq);
+}
+
 static void virtballoon_changed(struct virtio_device *vdev)
 {
 	struct virtio_balloon *vb = vdev->priv;
@@ -190,8 +260,11 @@
 		try_to_freeze();
 		wait_event_interruptible(vb->config_change,
 					 (diff = towards_target(vb)) != 0
+					 || vb->need_stats_update
 					 || kthread_should_stop()
 					 || freezing(current));
+		if (vb->need_stats_update)
+			stats_handle_request(vb);
 		if (diff > 0)
 			fill_balloon(vb, diff);
 		else if (diff < 0)
@@ -204,10 +277,10 @@
 static int virtballoon_probe(struct virtio_device *vdev)
 {
 	struct virtio_balloon *vb;
-	struct virtqueue *vqs[2];
-	vq_callback_t *callbacks[] = { balloon_ack, balloon_ack };
-	const char *names[] = { "inflate", "deflate" };
-	int err;
+	struct virtqueue *vqs[3];
+	vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
+	const char *names[] = { "inflate", "deflate", "stats" };
+	int err, nvqs;
 
 	vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
 	if (!vb) {
@@ -219,14 +292,31 @@
 	vb->num_pages = 0;
 	init_waitqueue_head(&vb->config_change);
 	vb->vdev = vdev;
+	vb->need_stats_update = 0;
 
-	/* We expect two virtqueues. */
-	err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
+	/* We expect two virtqueues: inflate and deflate,
+	 * and optionally stat. */
+	nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
+	err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
 	if (err)
 		goto out_free_vb;
 
 	vb->inflate_vq = vqs[0];
 	vb->deflate_vq = vqs[1];
+	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
+		struct scatterlist sg;
+		vb->stats_vq = vqs[2];
+
+		/*
+		 * Prime this virtqueue with one buffer so the hypervisor can
+		 * use it to signal us later.
+		 */
+		sg_init_one(&sg, vb->stats, sizeof vb->stats);
+		if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
+						  &sg, 1, 0, vb) < 0)
+			BUG();
+		vb->stats_vq->vq_ops->kick(vb->stats_vq);
+	}
 
 	vb->thread = kthread_run(balloon, vb, "vballoon");
 	if (IS_ERR(vb->thread)) {
@@ -264,7 +354,10 @@
 	kfree(vb);
 }
 
-static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST };
+static unsigned int features[] = {
+	VIRTIO_BALLOON_F_MUST_TELL_HOST,
+	VIRTIO_BALLOON_F_STATS_VQ,
+};
 
 static struct virtio_driver virtio_balloon_driver = {
 	.feature_table = features,
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 28d9cf7..1d5191f 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -702,7 +702,7 @@
 	.name		= "virtio-pci",
 	.id_table	= virtio_pci_id_table,
 	.probe		= virtio_pci_probe,
-	.remove		= virtio_pci_remove,
+	.remove		= __devexit_p(virtio_pci_remove),
 #ifdef CONFIG_PM
 	.suspend	= virtio_pci_suspend,
 	.resume		= virtio_pci_resume,
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index fbd2ecd..0db906b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -21,6 +21,24 @@
 #include <linux/virtio_config.h>
 #include <linux/device.h>
 
+/* virtio guest is communicating with a virtual "device" that actually runs on
+ * a host processor.  Memory barriers are used to control SMP effects. */
+#ifdef CONFIG_SMP
+/* Where possible, use SMP barriers which are more lightweight than mandatory
+ * barriers, because mandatory barriers control MMIO effects on accesses
+ * through relaxed memory I/O windows (which virtio does not use). */
+#define virtio_mb() smp_mb()
+#define virtio_rmb() smp_rmb()
+#define virtio_wmb() smp_wmb()
+#else
+/* We must force memory ordering even if guest is UP since host could be
+ * running on another CPU, but SMP barriers are defined to barrier() in that
+ * configuration. So fall back to mandatory barriers instead. */
+#define virtio_mb() mb()
+#define virtio_rmb() rmb()
+#define virtio_wmb() wmb()
+#endif
+
 #ifdef DEBUG
 /* For development, we want to crash whenever the ring is screwed. */
 #define BAD_RING(_vq, fmt, args...)				\
@@ -36,10 +54,9 @@
 			panic("%s:in_use = %i\n",		\
 			      (_vq)->vq.name, (_vq)->in_use);	\
 		(_vq)->in_use = __LINE__;			\
-		mb();						\
 	} while (0)
 #define END_USE(_vq) \
-	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0)
+	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
 #else
 #define BAD_RING(_vq, fmt, args...)				\
 	do {							\
@@ -221,13 +238,13 @@
 	START_USE(vq);
 	/* Descriptors and available array need to be set before we expose the
 	 * new available array entries. */
-	wmb();
+	virtio_wmb();
 
 	vq->vring.avail->idx += vq->num_added;
 	vq->num_added = 0;
 
 	/* Need to update avail index before checking if we should notify */
-	mb();
+	virtio_mb();
 
 	if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
 		/* Prod other side to tell it about changes. */
@@ -286,7 +303,7 @@
 	}
 
 	/* Only get used array entries after they have been exposed by host. */
-	rmb();
+	virtio_rmb();
 
 	i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
 	*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
@@ -324,7 +341,7 @@
 	/* We optimistically turn back on interrupts, then check if there was
 	 * more to do. */
 	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
-	mb();
+	virtio_mb();
 	if (unlikely(more_used(vq))) {
 		END_USE(vq);
 		return false;
@@ -334,6 +351,30 @@
 	return true;
 }
 
+static void *vring_detach_unused_buf(struct virtqueue *_vq)
+{
+	struct vring_virtqueue *vq = to_vvq(_vq);
+	unsigned int i;
+	void *buf;
+
+	START_USE(vq);
+
+	for (i = 0; i < vq->vring.num; i++) {
+		if (!vq->data[i])
+			continue;
+		/* detach_buf clears data, so grab it now. */
+		buf = vq->data[i];
+		detach_buf(vq, i);
+		END_USE(vq);
+		return buf;
+	}
+	/* That should have freed everything. */
+	BUG_ON(vq->num_free != vq->vring.num);
+
+	END_USE(vq);
+	return NULL;
+}
+
 irqreturn_t vring_interrupt(int irq, void *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
@@ -360,6 +401,7 @@
 	.kick = vring_kick,
 	.disable_cb = vring_disable_cb,
 	.enable_cb = vring_enable_cb,
+	.detach_unused_buf = vring_detach_unused_buf,
 };
 
 struct virtqueue *vring_new_virtqueue(unsigned int num,
@@ -406,8 +448,11 @@
 	/* Put everything in free lists. */
 	vq->num_free = num;
 	vq->free_head = 0;
-	for (i = 0; i < num-1; i++)
+	for (i = 0; i < num-1; i++) {
 		vq->vring.desc[i].next = i+1;
+		vq->data[i] = NULL;
+	}
+	vq->data[i] = NULL;
 
 	return &vq->vq;
 }
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index 123257b..f8650dc 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -10,16 +10,19 @@
 #include <linux/seq_file.h>
 #include <linux/stat.h>
 #include <linux/string.h>
+#include <linux/of.h>
+#include <linux/module.h>
 #include <asm/prom.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
-#ifndef HAVE_ARCH_DEVTREE_FIXUPS
 static inline void set_node_proc_entry(struct device_node *np,
 				       struct proc_dir_entry *de)
 {
-}
+#ifdef HAVE_ARCH_DEVTREE_FIXUPS
+	np->pde = de;
 #endif
+}
 
 static struct proc_dir_entry *proc_device_tree;
 
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 77b8be8..6f3ebb6 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -76,6 +76,27 @@
 #define xfs_buf_deallocate(bp) \
 	kmem_zone_free(xfs_buf_zone, (bp));
 
+static inline int
+xfs_buf_is_vmapped(
+	struct xfs_buf	*bp)
+{
+	/*
+	 * Return true if the buffer is vmapped.
+	 *
+	 * The XBF_MAPPED flag is set if the buffer should be mapped, but the
+	 * code is clever enough to know it doesn't have to map a single page,
+	 * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
+	 */
+	return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
+}
+
+static inline int
+xfs_buf_vmap_len(
+	struct xfs_buf	*bp)
+{
+	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
+}
+
 /*
  *	Page Region interfaces.
  *
@@ -314,7 +335,7 @@
 	if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
 		uint		i;
 
-		if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
+		if (xfs_buf_is_vmapped(bp))
 			free_address(bp->b_addr - bp->b_offset);
 
 		for (i = 0; i < bp->b_page_count; i++) {
@@ -1107,6 +1128,9 @@
 
 	xfs_buf_ioerror(bp, -error);
 
+	if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
+		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
+
 	do {
 		struct page	*page = bvec->bv_page;
 
@@ -1216,6 +1240,10 @@
 
 submit_io:
 	if (likely(bio->bi_size)) {
+		if (xfs_buf_is_vmapped(bp)) {
+			flush_kernel_vmap_range(bp->b_addr,
+						xfs_buf_vmap_len(bp));
+		}
 		submit_bio(rw, bio);
 		if (size)
 			goto next_chunk;
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 3cd9ccd..54508cc 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -242,6 +242,8 @@
 struct acpi_device_wakeup_flags {
 	u8 valid:1;		/* Can successfully enable wakeup? */
 	u8 run_wake:1;		/* Run-Wake GPE devices */
+	u8 always_enabled:1;	/* Run-wake devices that are always enabled */
+	u8 notifier_present:1;  /* Wake-up notify handler has been installed */
 };
 
 struct acpi_device_wakeup_state {
@@ -256,6 +258,7 @@
 	struct acpi_device_wakeup_state state;
 	struct acpi_device_wakeup_flags flags;
 	int prepare_count;
+	int run_wake_count;
 };
 
 /* Device */
@@ -386,6 +389,9 @@
 struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
 #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle))
 
+int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
+int acpi_disable_wakeup_device_power(struct acpi_device *dev);
+
 #ifdef CONFIG_PM_SLEEP
 int acpi_pm_device_sleep_state(struct device *, int *);
 int acpi_pm_device_sleep_wake(struct device *, bool);
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index f4906f6..3a4767c 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -104,6 +104,7 @@
 
 struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain,
 				   int bus);
+void pci_acpi_crs_quirks(void);
 
 /* --------------------------------------------------------------------------
                                     Processor
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 86e9735..3988f93 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -281,11 +281,11 @@
 /*
  * GPE Interfaces
  */
-acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type);
+acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action);
 
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number);
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type);
 
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number);
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type);
 
 acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags);
 
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 153f12d..73af408 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -668,15 +668,16 @@
 
 /*
  * GPE info flags - Per GPE
- * +-+-+-+---+---+-+
- * |7|6|5|4:3|2:1|0|
- * +-+-+-+---+---+-+
- *  | | |  |   |  |
- *  | | |  |   |  +--- Interrupt type: Edge or Level Triggered
- *  | | |  |   +--- Type: Wake-only, Runtime-only, or wake/runtime
+ * +-+-+-+---+-+-+-+
+ * |7|6|5|4:3|2|1|0|
+ * +-+-+-+---+-+-+-+
+ *  | | |  |  | | |
+ *  | | |  |  | | +--- Interrupt type: Edge or Level Triggered
+ *  | | |  |  | +--- GPE can wake the system
+ *  | | |  |  +--- Unused
  *  | | |  +--- Type of dispatch -- to method, handler, or none
- *  | | +--- Enabled for runtime?
- *  | +--- Enabled for wake?
+ *  | | +--- Unused
+ *  | +--- Unused
  *  +--- Unused
  */
 #define ACPI_GPE_XRUPT_TYPE_MASK        (u8) 0x01
@@ -687,22 +688,13 @@
 #define ACPI_GPE_TYPE_WAKE_RUN          (u8) 0x06
 #define ACPI_GPE_TYPE_WAKE              (u8) 0x02
 #define ACPI_GPE_TYPE_RUNTIME           (u8) 0x04	/* Default */
+#define ACPI_GPE_CAN_WAKE		(u8) 0x02
 
 #define ACPI_GPE_DISPATCH_MASK          (u8) 0x18
 #define ACPI_GPE_DISPATCH_HANDLER       (u8) 0x08
 #define ACPI_GPE_DISPATCH_METHOD        (u8) 0x10
 #define ACPI_GPE_DISPATCH_NOT_USED      (u8) 0x00	/* Default */
 
-#define ACPI_GPE_RUN_ENABLE_MASK        (u8) 0x20
-#define ACPI_GPE_RUN_ENABLED            (u8) 0x20
-#define ACPI_GPE_RUN_DISABLED           (u8) 0x00	/* Default */
-
-#define ACPI_GPE_WAKE_ENABLE_MASK       (u8) 0x40
-#define ACPI_GPE_WAKE_ENABLED           (u8) 0x40
-#define ACPI_GPE_WAKE_DISABLED          (u8) 0x00	/* Default */
-
-#define ACPI_GPE_ENABLE_MASK            (u8) 0x60	/* Both run/wake */
-
 /*
  * Flags for GPE and Lock interfaces
  */
diff --git a/include/crypto/md5.h b/include/crypto/md5.h
new file mode 100644
index 0000000..65f299b
--- /dev/null
+++ b/include/crypto/md5.h
@@ -0,0 +1,17 @@
+#ifndef _CRYPTO_MD5_H
+#define _CRYPTO_MD5_H
+
+#include <linux/types.h>
+
+#define MD5_DIGEST_SIZE		16
+#define MD5_HMAC_BLOCK_SIZE	64
+#define MD5_BLOCK_WORDS		16
+#define MD5_HASH_WORDS		4
+
+struct md5_state {
+	u32 hash[MD5_HASH_WORDS];
+	u32 block[MD5_BLOCK_WORDS];
+	u64 byte_count;
+};
+
+#endif
diff --git a/include/crypto/pcrypt.h b/include/crypto/pcrypt.h
new file mode 100644
index 0000000..d7d8bd8
--- /dev/null
+++ b/include/crypto/pcrypt.h
@@ -0,0 +1,51 @@
+/*
+ * pcrypt - Parallel crypto engine.
+ *
+ * Copyright (C) 2009 secunet Security Networks AG
+ * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTO_PCRYPT_H
+#define _CRYPTO_PCRYPT_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/padata.h>
+
+struct pcrypt_request {
+	struct padata_priv	padata;
+	void			*data;
+	void			*__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+static inline void *pcrypt_request_ctx(struct pcrypt_request *req)
+{
+	return req->__ctx;
+}
+
+static inline
+struct padata_priv *pcrypt_request_padata(struct pcrypt_request *req)
+{
+	return &req->padata;
+}
+
+static inline
+struct pcrypt_request *pcrypt_padata_request(struct padata_priv *padata)
+{
+	return container_of(padata, struct pcrypt_request, padata);
+}
+
+#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 8709365..b1344ec 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -501,7 +501,7 @@
 	void (*hiddev_report_event) (struct hid_device *, struct hid_report *);
 
 	/* handler for raw output data, used by hidraw */
-	int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t);
+	int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t, unsigned char);
 
 	/* debugging support via debugfs */
 	unsigned short debug;
@@ -663,7 +663,7 @@
 
 /* Applications from HID Usage Tables 4/8/99 Version 1.1 */
 /* We ignore a few input applications that are not widely used */
-#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002))
+#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || ((a >= 0x000d0002) && (a <= 0x000d0006)))
 
 /* HID core API */
 
@@ -690,6 +690,7 @@
 int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
 void hid_output_report(struct hid_report *report, __u8 *data);
 struct hid_device *hid_allocate_device(void);
+struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
 int hid_check_keys_pressed(struct hid_device *hid);
 int hid_connect(struct hid_device *hid, unsigned int connect_mask);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index ab2cc20..74152c0 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -17,6 +17,12 @@
 static inline void flush_kernel_dcache_page(struct page *page)
 {
 }
+static inline void flush_kernel_vmap_range(void *vaddr, int size)
+{
+}
+static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+}
 #endif
 
 #include <asm/kmap_types.h>
diff --git a/include/linux/input.h b/include/linux/input.h
index 663208a..f44ee91 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -598,6 +598,48 @@
 
 #define KEY_CAMERA_FOCUS	0x210
 
+#define BTN_TRIGGER_HAPPY		0x2c0
+#define BTN_TRIGGER_HAPPY1		0x2c0
+#define BTN_TRIGGER_HAPPY2		0x2c1
+#define BTN_TRIGGER_HAPPY3		0x2c2
+#define BTN_TRIGGER_HAPPY4		0x2c3
+#define BTN_TRIGGER_HAPPY5		0x2c4
+#define BTN_TRIGGER_HAPPY6		0x2c5
+#define BTN_TRIGGER_HAPPY7		0x2c6
+#define BTN_TRIGGER_HAPPY8		0x2c7
+#define BTN_TRIGGER_HAPPY9		0x2c8
+#define BTN_TRIGGER_HAPPY10		0x2c9
+#define BTN_TRIGGER_HAPPY11		0x2ca
+#define BTN_TRIGGER_HAPPY12		0x2cb
+#define BTN_TRIGGER_HAPPY13		0x2cc
+#define BTN_TRIGGER_HAPPY14		0x2cd
+#define BTN_TRIGGER_HAPPY15		0x2ce
+#define BTN_TRIGGER_HAPPY16		0x2cf
+#define BTN_TRIGGER_HAPPY17		0x2d0
+#define BTN_TRIGGER_HAPPY18		0x2d1
+#define BTN_TRIGGER_HAPPY19		0x2d2
+#define BTN_TRIGGER_HAPPY20		0x2d3
+#define BTN_TRIGGER_HAPPY21		0x2d4
+#define BTN_TRIGGER_HAPPY22		0x2d5
+#define BTN_TRIGGER_HAPPY23		0x2d6
+#define BTN_TRIGGER_HAPPY24		0x2d7
+#define BTN_TRIGGER_HAPPY25		0x2d8
+#define BTN_TRIGGER_HAPPY26		0x2d9
+#define BTN_TRIGGER_HAPPY27		0x2da
+#define BTN_TRIGGER_HAPPY28		0x2db
+#define BTN_TRIGGER_HAPPY29		0x2dc
+#define BTN_TRIGGER_HAPPY30		0x2dd
+#define BTN_TRIGGER_HAPPY31		0x2de
+#define BTN_TRIGGER_HAPPY32		0x2df
+#define BTN_TRIGGER_HAPPY33		0x2e0
+#define BTN_TRIGGER_HAPPY34		0x2e1
+#define BTN_TRIGGER_HAPPY35		0x2e2
+#define BTN_TRIGGER_HAPPY36		0x2e3
+#define BTN_TRIGGER_HAPPY37		0x2e4
+#define BTN_TRIGGER_HAPPY38		0x2e5
+#define BTN_TRIGGER_HAPPY39		0x2e6
+#define BTN_TRIGGER_HAPPY40		0x2e7
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 7129504..dda9841 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -112,6 +112,7 @@
 
 extern int request_resource(struct resource *root, struct resource *new);
 extern int release_resource(struct resource *new);
+void release_child_resources(struct resource *new);
 extern void reserve_region_with_split(struct resource *root,
 			     resource_size_t start, resource_size_t end,
 			     const char *name);
@@ -120,8 +121,10 @@
 extern int allocate_resource(struct resource *root, struct resource *new,
 			     resource_size_t size, resource_size_t min,
 			     resource_size_t max, resource_size_t align,
-			     void (*alignf)(void *, struct resource *,
-					    resource_size_t, resource_size_t),
+			     resource_size_t (*alignf)(void *,
+						       const struct resource *,
+						       resource_size_t,
+						       resource_size_t),
 			     void *alignf_data);
 int adjust_resource(struct resource *res, resource_size_t start,
 		    resource_size_t size);
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index e77c1ce..ab77609 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -51,6 +51,8 @@
 #define _4ECCCNTEN	(0x1 << 24)
 #define _4ECCEN		(0x1 << 23)
 #define _4ECCCORRECT	(0x1 << 22)
+#define SHBUSSEL	(0x1 << 20)
+#define SEL_16BIT	(0x1 << 19)
 #define SNAND_E		(0x1 << 18)	/* SNAND (0=512 1=2048)*/
 #define QTSEL_E		(0x1 << 17)
 #define ENDIAN		(0x1 << 16)	/* 1 = little endian */
@@ -96,6 +98,7 @@
 struct sh_flctl {
 	struct mtd_info		mtd;
 	struct nand_chip	chip;
+	struct platform_device	*pdev;
 	void __iomem		*reg;
 
 	uint8_t	done_buff[2048 + 64];	/* max size 2048 + 64 */
diff --git a/include/linux/of.h b/include/linux/of.h
index e7facd8..f6d9cbc 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -19,6 +19,11 @@
 #include <linux/bitops.h>
 #include <linux/kref.h>
 #include <linux/mod_devicetable.h>
+#include <linux/spinlock.h>
+
+#include <asm/byteorder.h>
+
+#ifdef CONFIG_OF
 
 typedef u32 phandle;
 typedef u32 ihandle;
@@ -39,10 +44,7 @@
 struct device_node {
 	const char *name;
 	const char *type;
-	phandle	node;
-#if !defined(CONFIG_SPARC)
-	phandle linux_phandle;
-#endif
+	phandle phandle;
 	char	*full_name;
 
 	struct	property *properties;
@@ -63,6 +65,11 @@
 #endif
 };
 
+/* Pointer for first entry in chain of all nodes. */
+extern struct device_node *allnodes;
+extern struct device_node *of_chosen;
+extern rwlock_t devtree_lock;
+
 static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
 {
 	return test_bit(flag, &n->_flags);
@@ -73,12 +80,6 @@
 	set_bit(flag, &n->_flags);
 }
 
-static inline void
-set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
-{
-	dn->pde = de;
-}
-
 extern struct device_node *of_find_all_nodes(struct device_node *prev);
 
 #if defined(CONFIG_SPARC)
@@ -101,26 +102,36 @@
  */
 
 /* Helper to read a big number; size is in cells (not bytes) */
-static inline u64 of_read_number(const u32 *cell, int size)
+static inline u64 of_read_number(const __be32 *cell, int size)
 {
 	u64 r = 0;
 	while (size--)
-		r = (r << 32) | *(cell++);
+		r = (r << 32) | be32_to_cpu(*(cell++));
 	return r;
 }
 
 /* Like of_read_number, but we want an unsigned long result */
-#ifdef CONFIG_PPC32
-static inline unsigned long of_read_ulong(const u32 *cell, int size)
+static inline unsigned long of_read_ulong(const __be32 *cell, int size)
 {
-	return cell[size-1];
+	/* toss away upper bits if unsigned long is smaller than u64 */
+	return of_read_number(cell, size);
 }
-#else
-#define of_read_ulong(cell, size)	of_read_number(cell, size)
-#endif
 
 #include <asm/prom.h>
 
+/* Default #address and #size cells.  Allow arch asm/prom.h to override */
+#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT)
+#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
+#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
+#endif
+
+/* Default string compare functions, Allow arch asm/prom.h to override */
+#if !defined(of_compat_cmp)
+#define of_compat_cmp(s1, s2, l)	strncasecmp((s1), (s2), (l))
+#define of_prop_cmp(s1, s2)		strcmp((s1), (s2))
+#define of_node_cmp(s1, s2)		strcasecmp((s1), (s2))
+#endif
+
 /* flag descriptions */
 #define OF_DYNAMIC	1 /* node and properties were allocated via kmalloc */
 #define OF_DETACHED	2 /* node has been detached from the device tree */
@@ -187,4 +198,19 @@
 	const char *list_name, const char *cells_name, int index,
 	struct device_node **out_node, const void **out_args);
 
+extern int of_machine_is_compatible(const char *compat);
+
+extern int prom_add_property(struct device_node* np, struct property* prop);
+extern int prom_remove_property(struct device_node *np, struct property *prop);
+extern int prom_update_property(struct device_node *np,
+				struct property *newprop,
+				struct property *oldprop);
+
+#if defined(CONFIG_OF_DYNAMIC)
+/* For updating the device tree at runtime */
+extern void of_attach_node(struct device_node *);
+extern void of_detach_node(struct device_node *);
+#endif
+
+#endif /* CONFIG_OF */
 #endif /* _LINUX_OF_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 41d432b..a1ca92c 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -42,45 +42,62 @@
  * ends when size is 0
  */
 struct boot_param_header {
-	u32	magic;			/* magic word OF_DT_HEADER */
-	u32	totalsize;		/* total size of DT block */
-	u32	off_dt_struct;		/* offset to structure */
-	u32	off_dt_strings;		/* offset to strings */
-	u32	off_mem_rsvmap;		/* offset to memory reserve map */
-	u32	version;		/* format version */
-	u32	last_comp_version;	/* last compatible version */
+	__be32	magic;			/* magic word OF_DT_HEADER */
+	__be32	totalsize;		/* total size of DT block */
+	__be32	off_dt_struct;		/* offset to structure */
+	__be32	off_dt_strings;		/* offset to strings */
+	__be32	off_mem_rsvmap;		/* offset to memory reserve map */
+	__be32	version;		/* format version */
+	__be32	last_comp_version;	/* last compatible version */
 	/* version 2 fields below */
-	u32	boot_cpuid_phys;	/* Physical CPU id we're booting on */
+	__be32	boot_cpuid_phys;	/* Physical CPU id we're booting on */
 	/* version 3 fields below */
-	u32	dt_strings_size;	/* size of the DT strings block */
+	__be32	dt_strings_size;	/* size of the DT strings block */
 	/* version 17 fields below */
-	u32	dt_struct_size;		/* size of the DT structure block */
+	__be32	dt_struct_size;		/* size of the DT structure block */
 };
 
+/* TBD: Temporary export of fdt globals - remove when code fully merged */
+extern int __initdata dt_root_addr_cells;
+extern int __initdata dt_root_size_cells;
+extern struct boot_param_header *initial_boot_params;
+
 /* For scanning the flat device-tree at boot time */
-extern int __init of_scan_flat_dt(int (*it)(unsigned long node,
-					    const char *uname, int depth,
-					    void *data),
-				  void *data);
-extern void __init *of_get_flat_dt_prop(unsigned long node, const char *name,
-					unsigned long *size);
-extern int __init of_flat_dt_is_compatible(unsigned long node,
-					   const char *name);
-extern unsigned long __init of_get_flat_dt_root(void);
+extern char *find_flat_dt_string(u32 offset);
+extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
+				     int depth, void *data),
+			   void *data);
+extern void *of_get_flat_dt_prop(unsigned long node, const char *name,
+				 unsigned long *size);
+extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
+extern unsigned long of_get_flat_dt_root(void);
+extern void early_init_dt_scan_chosen_arch(unsigned long node);
+extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
+				     int depth, void *data);
+extern void early_init_dt_check_for_initrd(unsigned long node);
+extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
+				     int depth, void *data);
+extern void early_init_dt_add_memory_arch(u64 base, u64 size);
+extern u64 early_init_dt_alloc_memory_arch(u64 size, u64 align);
+extern u64 dt_mem_next_cell(int s, __be32 **cellp);
+
+/*
+ * If BLK_DEV_INITRD, the fdt early init code will call this function,
+ * to be provided by the arch code. start and end are specified as
+ * physical addresses.
+ */
+#ifdef CONFIG_BLK_DEV_INITRD
+extern void early_init_dt_setup_initrd_arch(unsigned long start,
+					    unsigned long end);
+#endif
+
+/* Early flat tree scan hooks */
+extern int early_init_dt_scan_root(unsigned long node, const char *uname,
+				   int depth, void *data);
 
 /* Other Prototypes */
-extern void finish_device_tree(void);
 extern void unflatten_device_tree(void);
 extern void early_init_devtree(void *);
-extern int machine_is_compatible(const char *compat);
-extern void print_properties(struct device_node *node);
-extern int prom_n_intr_cells(struct device_node* np);
-extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
-extern int prom_add_property(struct device_node* np, struct property* prop);
-extern int prom_remove_property(struct device_node *np, struct property *prop);
-extern int prom_update_property(struct device_node *np,
-				struct property *newprop,
-				struct property *oldprop);
 
 #endif /* __ASSEMBLY__ */
 #endif /* _LINUX_OF_FDT_H */
diff --git a/include/linux/padata.h b/include/linux/padata.h
new file mode 100644
index 0000000..51611da
--- /dev/null
+++ b/include/linux/padata.h
@@ -0,0 +1,88 @@
+/*
+ * padata.h - header for the padata parallelization interface
+ *
+ * Copyright (C) 2008, 2009 secunet Security Networks AG
+ * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef PADATA_H
+#define PADATA_H
+
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+struct padata_priv {
+	struct list_head	list;
+	struct parallel_data	*pd;
+	int			cb_cpu;
+	int			seq_nr;
+	int			info;
+	void                    (*parallel)(struct padata_priv *padata);
+	void                    (*serial)(struct padata_priv *padata);
+};
+
+struct padata_list {
+	struct list_head        list;
+	spinlock_t              lock;
+};
+
+struct padata_queue {
+	struct padata_list	parallel;
+	struct padata_list	reorder;
+	struct padata_list	serial;
+	struct work_struct	pwork;
+	struct work_struct	swork;
+	struct parallel_data    *pd;
+	atomic_t		num_obj;
+	int			cpu_index;
+};
+
+struct parallel_data {
+	struct padata_instance	*pinst;
+	struct padata_queue	*queue;
+	atomic_t		seq_nr;
+	atomic_t		reorder_objects;
+	atomic_t                refcnt;
+	unsigned int		max_seq_nr;
+	cpumask_var_t		cpumask;
+	spinlock_t              lock;
+};
+
+struct padata_instance {
+	struct notifier_block   cpu_notifier;
+	struct workqueue_struct *wq;
+	struct parallel_data	*pd;
+	cpumask_var_t           cpumask;
+	struct mutex		lock;
+	u8			flags;
+#define	PADATA_INIT		1
+#define	PADATA_RESET		2
+};
+
+extern struct padata_instance *padata_alloc(const struct cpumask *cpumask,
+					    struct workqueue_struct *wq);
+extern void padata_free(struct padata_instance *pinst);
+extern int padata_do_parallel(struct padata_instance *pinst,
+			      struct padata_priv *padata, int cb_cpu);
+extern void padata_do_serial(struct padata_priv *padata);
+extern int padata_set_cpumask(struct padata_instance *pinst,
+			      cpumask_var_t cpumask);
+extern int padata_add_cpu(struct padata_instance *pinst, int cpu);
+extern int padata_remove_cpu(struct padata_instance *pinst, int cpu);
+extern void padata_start(struct padata_instance *pinst);
+extern void padata_stop(struct padata_instance *pinst);
+#endif
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 93a7c08f..c8b6473 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -11,6 +11,13 @@
 #include <linux/acpi.h>
 
 #ifdef CONFIG_ACPI
+extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
+						 struct pci_bus *pci_bus);
+extern acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev);
+extern acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
+					     struct pci_dev *pci_dev);
+extern acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev);
+
 static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
 {
 	struct pci_bus *pbus = pdev->bus;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c1968f4..e19a696 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -187,6 +187,33 @@
 	PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
 };
 
+/* Based on the PCI Hotplug Spec, but some values are made up by us */
+enum pci_bus_speed {
+	PCI_SPEED_33MHz			= 0x00,
+	PCI_SPEED_66MHz			= 0x01,
+	PCI_SPEED_66MHz_PCIX		= 0x02,
+	PCI_SPEED_100MHz_PCIX		= 0x03,
+	PCI_SPEED_133MHz_PCIX		= 0x04,
+	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
+	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
+	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
+	PCI_SPEED_66MHz_PCIX_266	= 0x09,
+	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
+	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
+	AGP_UNKNOWN			= 0x0c,
+	AGP_1X				= 0x0d,
+	AGP_2X				= 0x0e,
+	AGP_4X				= 0x0f,
+	AGP_8X				= 0x10,
+	PCI_SPEED_66MHz_PCIX_533	= 0x11,
+	PCI_SPEED_100MHz_PCIX_533	= 0x12,
+	PCI_SPEED_133MHz_PCIX_533	= 0x13,
+	PCIE_SPEED_2_5GT		= 0x14,
+	PCIE_SPEED_5_0GT		= 0x15,
+	PCIE_SPEED_8_0GT		= 0x16,
+	PCI_SPEED_UNKNOWN		= 0xff,
+};
+
 struct pci_cap_saved_state {
 	struct hlist_node next;
 	char cap_nr;
@@ -239,6 +266,7 @@
 					   configuration space */
 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
 					   can be generated */
+	unsigned int	pme_interrupt:1;
 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
 	unsigned int	no_d1d2:1;	/* Only allow D0 and D3 */
@@ -275,7 +303,8 @@
 	unsigned int	msix_enabled:1;
 	unsigned int	ari_enabled:1;	/* ARI forwarding */
 	unsigned int	is_managed:1;
-	unsigned int	is_pcie:1;
+	unsigned int	is_pcie:1;	/* Obsolete. Will be removed.
+					   Use pci_is_pcie() instead */
 	unsigned int    needs_freset:1; /* Dev requires fundamental reset */
 	unsigned int	state_saved:1;
 	unsigned int	is_physfn:1;
@@ -335,9 +364,26 @@
 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
 }
 
-#ifndef PCI_BUS_NUM_RESOURCES
-#define PCI_BUS_NUM_RESOURCES	16
-#endif
+/*
+ * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
+ * to P2P or CardBus bridge windows) go in a table.  Additional ones (for
+ * buses below host bridges or subtractive decode bridges) go in the list.
+ * Use pci_bus_for_each_resource() to iterate through all the resources.
+ */
+
+/*
+ * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
+ * and there's no way to program the bridge with the details of the window.
+ * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
+ * decode bit set, because they are explicit and can be programmed with _SRS.
+ */
+#define PCI_SUBTRACTIVE_DECODE	0x1
+
+struct pci_bus_resource {
+	struct list_head list;
+	struct resource *res;
+	unsigned int flags;
+};
 
 #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
 
@@ -348,8 +394,8 @@
 	struct list_head devices;	/* list of devices on this bus */
 	struct pci_dev	*self;		/* bridge device as seen by parent */
 	struct list_head slots;		/* list of slots on this bus */
-	struct resource	*resource[PCI_BUS_NUM_RESOURCES];
-					/* address space routed to this bus */
+	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
+	struct list_head resources;	/* address space routed to this bus */
 
 	struct pci_ops	*ops;		/* configuration access functions */
 	void		*sysdata;	/* hook for sys-specific extension */
@@ -359,6 +405,8 @@
 	unsigned char	primary;	/* number of primary bridge */
 	unsigned char	secondary;	/* number of secondary bridge */
 	unsigned char	subordinate;	/* max number of subordinate buses */
+	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
+	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
 
 	char		name[48];
 
@@ -563,7 +611,8 @@
 char *pcibios_setup(char *str);
 
 /* Used only when drivers/pci/setup.c is used */
-void pcibios_align_resource(void *, struct resource *, resource_size_t,
+resource_size_t pcibios_align_resource(void *, const struct resource *,
+				resource_size_t,
 				resource_size_t);
 void pcibios_update_irq(struct pci_dev *, int irq);
 
@@ -589,6 +638,7 @@
 			       struct pci_ops *ops, void *sysdata);
 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
 				int busnr);
+void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
 				 const char *name,
 				 struct hotplug_slot *hotplug);
@@ -615,12 +665,6 @@
 
 /* Generic PCI functions exported to card drivers */
 
-#ifdef CONFIG_PCI_LEGACY
-struct pci_dev __deprecated *pci_find_device(unsigned int vendor,
-					     unsigned int device,
-					     struct pci_dev *from);
-#endif /* CONFIG_PCI_LEGACY */
-
 enum pci_lost_interrupt_reason {
 	PCI_LOST_IRQ_NO_INFORMATION = 0,
 	PCI_LOST_IRQ_DISABLE_MSI,
@@ -750,11 +794,19 @@
 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
 void pci_pme_active(struct pci_dev *dev, bool enable);
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
+int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
+		      bool runtime, bool enable);
 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
 pci_power_t pci_target_state(struct pci_dev *dev);
 int pci_prepare_to_sleep(struct pci_dev *dev);
 int pci_back_from_sleep(struct pci_dev *dev);
+bool pci_dev_run_wake(struct pci_dev *dev);
+
+static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
+				  bool enable)
+{
+	return __pci_enable_wake(dev, state, false, enable);
+}
 
 /* For use by arch with custom probe code */
 void set_pcie_port_type(struct pci_dev *pdev);
@@ -776,6 +828,7 @@
 void pci_bus_size_bridges(struct pci_bus *bus);
 int pci_claim_resource(struct pci_dev *, int);
 void pci_assign_unassigned_resources(void);
+void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
 void pdev_enable_device(struct pci_dev *);
 void pdev_sort_resources(struct pci_dev *, struct resource_list *);
 int pci_enable_resources(struct pci_dev *, int mask);
@@ -793,12 +846,23 @@
 void pci_release_selected_regions(struct pci_dev *, int);
 
 /* drivers/pci/bus.c */
+void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags);
+struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
+void pci_bus_remove_resources(struct pci_bus *bus);
+
+#define pci_bus_for_each_resource(bus, res, i)				\
+	for (i = 0;							\
+	    (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
+	     i++)
+
 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
 			struct resource *res, resource_size_t size,
 			resource_size_t align, resource_size_t min,
 			unsigned int type_mask,
-			void (*alignf)(void *, struct resource *,
-				resource_size_t, resource_size_t),
+			resource_size_t (*alignf)(void *,
+						  const struct resource *,
+						  resource_size_t,
+						  resource_size_t),
 			void *alignf_data);
 void pci_enable_bridges(struct pci_bus *bus);
 
@@ -977,13 +1041,6 @@
 _PCI_NOP_ALL(read, *)
 _PCI_NOP_ALL(write,)
 
-static inline struct pci_dev *pci_find_device(unsigned int vendor,
-					      unsigned int device,
-					      struct pci_dev *from)
-{
-	return NULL;
-}
-
 static inline struct pci_dev *pci_get_device(unsigned int vendor,
 					     unsigned int device,
 					     struct pci_dev *from)
@@ -1241,8 +1298,12 @@
 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
 			suspend##vendor##device##hook, vendor, device, hook)
 
-
+#ifdef CONFIG_PCI_QUIRKS
 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
+#else
+static inline void pci_fixup_device(enum pci_fixup_pass pass,
+				    struct pci_dev *dev) {}
+#endif
 
 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 652ba79..5d09cba 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -28,26 +28,6 @@
 #ifndef _PCI_HOTPLUG_H
 #define _PCI_HOTPLUG_H
 
-
-/* These values come from the PCI Hotplug Spec */
-enum pci_bus_speed {
-	PCI_SPEED_33MHz			= 0x00,
-	PCI_SPEED_66MHz			= 0x01,
-	PCI_SPEED_66MHz_PCIX		= 0x02,
-	PCI_SPEED_100MHz_PCIX		= 0x03,
-	PCI_SPEED_133MHz_PCIX		= 0x04,
-	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
-	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
-	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
-	PCI_SPEED_66MHz_PCIX_266	= 0x09,
-	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
-	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
-	PCI_SPEED_66MHz_PCIX_533	= 0x11,
-	PCI_SPEED_100MHz_PCIX_533	= 0x12,
-	PCI_SPEED_133MHz_PCIX_533	= 0x13,
-	PCI_SPEED_UNKNOWN		= 0xff,
-};
-
 /* These values come from the PCI Express Spec */
 enum pcie_link_width {
 	PCIE_LNK_WIDTH_RESRV	= 0x00,
@@ -61,12 +41,6 @@
 	PCIE_LNK_WIDTH_UNKNOWN  = 0xFF,
 };
 
-enum pcie_link_speed {
-	PCIE_2_5GB		= 0x14,
-	PCIE_5_0GB		= 0x15,
-	PCIE_LNK_SPEED_UNKNOWN	= 0xFF,
-};
-
 /**
  * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
  * @owner: The module owner of this structure
@@ -89,12 +63,6 @@
  * @get_adapter_status: Called to get see if an adapter is present in the slot or not.
  *	If this field is NULL, the value passed in the struct hotplug_slot_info
  *	will be used when this value is requested by a user.
- * @get_max_bus_speed: Called to get the max bus speed for a slot.
- *	If this field is NULL, the value passed in the struct hotplug_slot_info
- *	will be used when this value is requested by a user.
- * @get_cur_bus_speed: Called to get the current bus speed for a slot.
- *	If this field is NULL, the value passed in the struct hotplug_slot_info
- *	will be used when this value is requested by a user.
  *
  * The table of function pointers that is passed to the hotplug pci core by a
  * hotplug pci driver.  These functions are called by the hotplug pci core when
@@ -112,17 +80,14 @@
 	int (*get_attention_status)	(struct hotplug_slot *slot, u8 *value);
 	int (*get_latch_status)		(struct hotplug_slot *slot, u8 *value);
 	int (*get_adapter_status)	(struct hotplug_slot *slot, u8 *value);
-	int (*get_max_bus_speed)	(struct hotplug_slot *slot, enum pci_bus_speed *value);
-	int (*get_cur_bus_speed)	(struct hotplug_slot *slot, enum pci_bus_speed *value);
 };
 
 /**
  * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
- * @power: if power is enabled or not (1/0)
+ * @power_status: if power is enabled or not (1/0)
  * @attention_status: if the attention light is enabled or not (1/0)
  * @latch_status: if the latch (if any) is open or closed (1/0)
- * @adapter_present: if there is a pci board present in the slot or not (1/0)
- * @address: (domain << 16 | bus << 8 | dev)
+ * @adapter_status: if there is a pci board present in the slot or not (1/0)
  *
  * Used to notify the hotplug pci core of the status of a specific slot.
  */
@@ -131,8 +96,6 @@
 	u8	attention_status;
 	u8	latch_status;
 	u8	adapter_status;
-	enum pci_bus_speed	max_bus_speed;
-	enum pci_bus_speed	cur_bus_speed;
 };
 
 /**
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cca8a04..0be8243 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2417,6 +2417,9 @@
 #define PCI_DEVICE_ID_INTEL_82840_HB	0x1a21
 #define PCI_DEVICE_ID_INTEL_82845_HB	0x1a30
 #define PCI_DEVICE_ID_INTEL_IOAT	0x1a38
+#define PCI_DEVICE_ID_INTEL_CPT_SMBUS	0x1c22
+#define PCI_DEVICE_ID_INTEL_CPT_LPC1	0x1c42
+#define PCI_DEVICE_ID_INTEL_CPT_LPC2	0x1c43
 #define PCI_DEVICE_ID_INTEL_82801AA_0	0x2410
 #define PCI_DEVICE_ID_INTEL_82801AA_1	0x2411
 #define PCI_DEVICE_ID_INTEL_82801AA_3	0x2413
diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h
index 228b0b6..0b80c80 100644
--- a/include/linux/pfkeyv2.h
+++ b/include/linux/pfkeyv2.h
@@ -315,6 +315,7 @@
 #define SADB_X_EALG_AES_GCM_ICV12	19
 #define SADB_X_EALG_AES_GCM_ICV16	20
 #define SADB_X_EALG_CAMELLIACBC		22
+#define SADB_X_EALG_NULL_AES_GMAC	23
 #define SADB_EALG_MAX                   253 /* last EALG */
 /* private allocations should use 249-255 (RFC2407) */
 #define SADB_X_EALG_SERPENTCBC  252     /* draft-ietf-ipsec-ciph-aes-cbc-00 */
diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
index 6b537f1..31e1ff6 100644
--- a/include/linux/raid_class.h
+++ b/include/linux/raid_class.h
@@ -32,6 +32,7 @@
 	RAID_LEVEL_0,
 	RAID_LEVEL_1,
 	RAID_LEVEL_10,
+	RAID_LEVEL_1E,
 	RAID_LEVEL_3,
 	RAID_LEVEL_4,
 	RAID_LEVEL_5,
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 4ef246f..51d288d 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -45,7 +45,7 @@
 #define INTC_SMP(stride, nr)
 #endif
 
-struct intc_desc {
+struct intc_hw_desc {
 	struct intc_vect *vectors;
 	unsigned int nr_vectors;
 	struct intc_group *groups;
@@ -56,29 +56,40 @@
 	unsigned int nr_prio_regs;
 	struct intc_sense_reg *sense_regs;
 	unsigned int nr_sense_regs;
-	char *name;
 	struct intc_mask_reg *ack_regs;
 	unsigned int nr_ack_regs;
 };
 
 #define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a)
+#define INTC_HW_DESC(vectors, groups, mask_regs,	\
+		     prio_regs,	sense_regs, ack_regs)	\
+{							\
+	_INTC_ARRAY(vectors), _INTC_ARRAY(groups),	\
+	_INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs),	\
+	_INTC_ARRAY(sense_regs), _INTC_ARRAY(ack_regs),	\
+}
+
+struct intc_desc {
+	char *name;
+	intc_enum force_enable;
+	intc_enum force_disable;
+	struct intc_hw_desc hw;
+};
+
 #define DECLARE_INTC_DESC(symbol, chipname, vectors, groups,		\
 	mask_regs, prio_regs, sense_regs)				\
 struct intc_desc symbol __initdata = {					\
-	_INTC_ARRAY(vectors), _INTC_ARRAY(groups),			\
-	_INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs),			\
-	_INTC_ARRAY(sense_regs),					\
-	chipname,							\
+	.name = chipname,						\
+	.hw = INTC_HW_DESC(vectors, groups, mask_regs,			\
+			   prio_regs, sense_regs, NULL),		\
 }
 
 #define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups,	\
 	mask_regs, prio_regs, sense_regs, ack_regs)			\
 struct intc_desc symbol __initdata = {					\
-	_INTC_ARRAY(vectors), _INTC_ARRAY(groups),			\
-	_INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs),			\
-	_INTC_ARRAY(sense_regs),					\
-	chipname,							\
-	_INTC_ARRAY(ack_regs),						\
+	.name = chipname,						\
+	.hw = INTC_HW_DESC(vectors, groups, mask_regs,			\
+			   prio_regs, sense_regs, ack_regs),		\
 }
 
 void __init register_intc_controller(struct intc_desc *desc);
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h
index 51b3e77..cc813f9 100644
--- a/include/linux/spi/dw_spi.h
+++ b/include/linux/spi/dw_spi.h
@@ -90,6 +90,7 @@
 	unsigned long		paddr;
 	u32			iolen;
 	int			irq;
+	u32			fifo_len;	/* depth of the FIFO buffer */
 	u32			max_freq;	/* max bus freq supported */
 
 	u16			bus_num;
@@ -171,6 +172,10 @@
 {
 	if (cs > dws->num_cs)
 		return;
+
+	if (dws->cs_control)
+		dws->cs_control(1);
+
 	dw_writel(dws, ser, 1 << cs);
 }
 
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 057a2e0..f508c65 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -51,6 +51,9 @@
  *	This re-enables callbacks; it returns "false" if there are pending
  *	buffers in the queue, to detect a possible race between the driver
  *	checking for more work, and enabling callbacks.
+ * @detach_unused_buf: detach first unused buffer
+ * 	vq: the struct virtqueue we're talking about.
+ * 	Returns NULL or the "data" token handed to add_buf
  *
  * Locking rules are straightforward: the driver is responsible for
  * locking.  No two operations may be invoked simultaneously, with the exception
@@ -71,6 +74,7 @@
 
 	void (*disable_cb)(struct virtqueue *vq);
 	bool (*enable_cb)(struct virtqueue *vq);
+	void *(*detach_unused_buf)(struct virtqueue *vq);
 };
 
 /**
diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h
index 1418f04..a50ecd1 100644
--- a/include/linux/virtio_balloon.h
+++ b/include/linux/virtio_balloon.h
@@ -7,6 +7,7 @@
 
 /* The feature bitmap for virtio balloon */
 #define VIRTIO_BALLOON_F_MUST_TELL_HOST	0 /* Tell before reclaiming pages */
+#define VIRTIO_BALLOON_F_STATS_VQ	1 /* Memory Stats virtqueue */
 
 /* Size of a PFN in the balloon interface. */
 #define VIRTIO_BALLOON_PFN_SHIFT 12
@@ -18,4 +19,18 @@
 	/* Number of pages we've actually got in balloon. */
 	__le32 actual;
 };
+
+#define VIRTIO_BALLOON_S_SWAP_IN  0   /* Amount of memory swapped in */
+#define VIRTIO_BALLOON_S_SWAP_OUT 1   /* Amount of memory swapped out */
+#define VIRTIO_BALLOON_S_MAJFLT   2   /* Number of major faults */
+#define VIRTIO_BALLOON_S_MINFLT   3   /* Number of minor faults */
+#define VIRTIO_BALLOON_S_MEMFREE  4   /* Total amount of free memory */
+#define VIRTIO_BALLOON_S_MEMTOT   5   /* Total amount of memory */
+#define VIRTIO_BALLOON_S_NR       6
+
+struct virtio_balloon_stat {
+	u16 tag;
+	u64 val;
+} __attribute__((packed));
+
 #endif /* _LINUX_VIRTIO_BALLOON_H */
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index fd294c5..e52029e 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -15,6 +15,7 @@
 #define VIRTIO_BLK_F_BLK_SIZE	6	/* Block size of disk is available*/
 #define VIRTIO_BLK_F_SCSI	7	/* Supports scsi command passthru */
 #define VIRTIO_BLK_F_FLUSH	9	/* Cache flush command support */
+#define VIRTIO_BLK_F_TOPOLOGY	10	/* Topology information is available */
 
 struct virtio_blk_config {
 	/* The capacity (in 512-byte sectors). */
@@ -29,8 +30,20 @@
 		__u8 heads;
 		__u8 sectors;
 	} geometry;
+
 	/* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
 	__u32 blk_size;
+
+	/* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY  */
+	/* exponent for physical block per logical block. */
+	__u8 physical_block_exp;
+	/* alignment offset in logical blocks. */
+	__u8 alignment_offset;
+	/* minimum I/O size without performance penalty in logical blocks. */
+	__u16 min_io_size;
+	/* optimal sustained I/O size in logical blocks. */
+	__u32 opt_io_size;
+
 } __attribute__((packed));
 
 /*
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
index fe88517..ae4f039 100644
--- a/include/linux/virtio_console.h
+++ b/include/linux/virtio_console.h
@@ -3,19 +3,45 @@
 #include <linux/types.h>
 #include <linux/virtio_ids.h>
 #include <linux/virtio_config.h>
-/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
- * anyone can use the definitions to implement compatible drivers/servers. */
+/*
+ * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
+ * anyone can use the definitions to implement compatible drivers/servers.
+ *
+ * Copyright (C) Red Hat, Inc., 2009, 2010
+ */
 
 /* Feature bits */
 #define VIRTIO_CONSOLE_F_SIZE	0	/* Does host provide console size? */
+#define VIRTIO_CONSOLE_F_MULTIPORT 1	/* Does host provide multiple ports? */
 
 struct virtio_console_config {
 	/* colums of the screens */
 	__u16 cols;
 	/* rows of the screens */
 	__u16 rows;
+	/* max. number of ports this device can hold */
+	__u32 max_nr_ports;
+	/* number of ports added so far */
+	__u32 nr_ports;
 } __attribute__((packed));
 
+/*
+ * A message that's passed between the Host and the Guest for a
+ * particular port.
+ */
+struct virtio_console_control {
+	__u32 id;		/* Port number */
+	__u16 event;		/* The kind of control event (see below) */
+	__u16 value;		/* Extra information for the key */
+};
+
+/* Some events for control messages */
+#define VIRTIO_CONSOLE_PORT_READY	0
+#define VIRTIO_CONSOLE_CONSOLE_PORT	1
+#define VIRTIO_CONSOLE_RESIZE		2
+#define VIRTIO_CONSOLE_PORT_OPEN	3
+#define VIRTIO_CONSOLE_PORT_NAME	4
+#define VIRTIO_CONSOLE_PORT_REMOVE	5
 
 #ifdef __KERNEL__
 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 7c44499..d80b6db 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -348,7 +348,8 @@
 			    struct scsi_sense_hdr *);
 extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
 				int retries, struct scsi_sense_hdr *sshdr);
-extern unsigned char *scsi_get_vpd_page(struct scsi_device *, u8 page);
+extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf,
+			     int buf_len);
 extern int scsi_device_set_state(struct scsi_device *sdev,
 				 enum scsi_device_state state);
 extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 61ad359..ffeebc3 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -107,6 +107,8 @@
 	struct sas_rphy		rphy;
 	/* flags */
 	unsigned		ready_led_meaning:1;
+	unsigned		tlr_supported:1;
+	unsigned		tlr_enabled:1;
 	/* parameters */
 	u16			I_T_nexus_loss_timeout;
 	u16			initiator_response_timeout;
@@ -181,6 +183,11 @@
 extern void sas_phy_delete(struct sas_phy *);
 extern int scsi_is_sas_phy(const struct device *);
 
+unsigned int sas_tlr_supported(struct scsi_device *);
+unsigned int sas_is_tlr_enabled(struct scsi_device *);
+void sas_disable_tlr(struct scsi_device *);
+void sas_enable_tlr(struct scsi_device *);
+
 extern struct sas_rphy *sas_end_device_alloc(struct sas_port *);
 extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type);
 void sas_rphy_free(struct sas_rphy *);
diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h
index 2882054..2cc893f 100644
--- a/include/video/sh_mobile_lcdc.h
+++ b/include/video/sh_mobile_lcdc.h
@@ -34,6 +34,8 @@
 #define LCDC_FLAGS_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */
 #define LCDC_FLAGS_DWCNT (1 << 4) /* Disable dotclock during blanking */
 
+#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
+
 struct sh_mobile_lcdc_sys_bus_cfg {
 	unsigned long ldmt2r;
 	unsigned long ldmt3r;
diff --git a/init/Kconfig b/init/Kconfig
index d95ca7c..1510e17 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1262,4 +1262,8 @@
 config PREEMPT_NOTIFIERS
 	bool
 
+config PADATA
+	depends on SMP
+	bool
+
 source "kernel/Kconfig.locks"
diff --git a/kernel/Makefile b/kernel/Makefile
index 864ff75..6aebdeb 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -100,6 +100,7 @@
 obj-$(CONFIG_PERF_EVENTS) += perf_event.o
 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
 obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
+obj-$(CONFIG_PADATA) += padata.o
 
 ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
 # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/padata.c b/kernel/padata.c
new file mode 100644
index 0000000..6f9bcb8
--- /dev/null
+++ b/kernel/padata.c
@@ -0,0 +1,690 @@
+/*
+ * padata.c - generic interface to process data streams in parallel
+ *
+ * Copyright (C) 2008, 2009 secunet Security Networks AG
+ * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/padata.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/rcupdate.h>
+
+#define MAX_SEQ_NR INT_MAX - NR_CPUS
+#define MAX_OBJ_NUM 10000 * NR_CPUS
+
+static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
+{
+	int cpu, target_cpu;
+
+	target_cpu = cpumask_first(pd->cpumask);
+	for (cpu = 0; cpu < cpu_index; cpu++)
+		target_cpu = cpumask_next(target_cpu, pd->cpumask);
+
+	return target_cpu;
+}
+
+static int padata_cpu_hash(struct padata_priv *padata)
+{
+	int cpu_index;
+	struct parallel_data *pd;
+
+	pd =  padata->pd;
+
+	/*
+	 * Hash the sequence numbers to the cpus by taking
+	 * seq_nr mod. number of cpus in use.
+	 */
+	cpu_index =  padata->seq_nr % cpumask_weight(pd->cpumask);
+
+	return padata_index_to_cpu(pd, cpu_index);
+}
+
+static void padata_parallel_worker(struct work_struct *work)
+{
+	struct padata_queue *queue;
+	struct parallel_data *pd;
+	struct padata_instance *pinst;
+	LIST_HEAD(local_list);
+
+	local_bh_disable();
+	queue = container_of(work, struct padata_queue, pwork);
+	pd = queue->pd;
+	pinst = pd->pinst;
+
+	spin_lock(&queue->parallel.lock);
+	list_replace_init(&queue->parallel.list, &local_list);
+	spin_unlock(&queue->parallel.lock);
+
+	while (!list_empty(&local_list)) {
+		struct padata_priv *padata;
+
+		padata = list_entry(local_list.next,
+				    struct padata_priv, list);
+
+		list_del_init(&padata->list);
+
+		padata->parallel(padata);
+	}
+
+	local_bh_enable();
+}
+
+/*
+ * padata_do_parallel - padata parallelization function
+ *
+ * @pinst: padata instance
+ * @padata: object to be parallelized
+ * @cb_cpu: cpu the serialization callback function will run on,
+ *          must be in the cpumask of padata.
+ *
+ * The parallelization callback function will run with BHs off.
+ * Note: Every object which is parallelized by padata_do_parallel
+ * must be seen by padata_do_serial.
+ */
+int padata_do_parallel(struct padata_instance *pinst,
+		       struct padata_priv *padata, int cb_cpu)
+{
+	int target_cpu, err;
+	struct padata_queue *queue;
+	struct parallel_data *pd;
+
+	rcu_read_lock_bh();
+
+	pd = rcu_dereference(pinst->pd);
+
+	err = 0;
+	if (!(pinst->flags & PADATA_INIT))
+		goto out;
+
+	err =  -EBUSY;
+	if ((pinst->flags & PADATA_RESET))
+		goto out;
+
+	if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
+		goto out;
+
+	err = -EINVAL;
+	if (!cpumask_test_cpu(cb_cpu, pd->cpumask))
+		goto out;
+
+	err = -EINPROGRESS;
+	atomic_inc(&pd->refcnt);
+	padata->pd = pd;
+	padata->cb_cpu = cb_cpu;
+
+	if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
+		atomic_set(&pd->seq_nr, -1);
+
+	padata->seq_nr = atomic_inc_return(&pd->seq_nr);
+
+	target_cpu = padata_cpu_hash(padata);
+	queue = per_cpu_ptr(pd->queue, target_cpu);
+
+	spin_lock(&queue->parallel.lock);
+	list_add_tail(&padata->list, &queue->parallel.list);
+	spin_unlock(&queue->parallel.lock);
+
+	queue_work_on(target_cpu, pinst->wq, &queue->pwork);
+
+out:
+	rcu_read_unlock_bh();
+
+	return err;
+}
+EXPORT_SYMBOL(padata_do_parallel);
+
+static struct padata_priv *padata_get_next(struct parallel_data *pd)
+{
+	int cpu, num_cpus, empty, calc_seq_nr;
+	int seq_nr, next_nr, overrun, next_overrun;
+	struct padata_queue *queue, *next_queue;
+	struct padata_priv *padata;
+	struct padata_list *reorder;
+
+	empty = 0;
+	next_nr = -1;
+	next_overrun = 0;
+	next_queue = NULL;
+
+	num_cpus = cpumask_weight(pd->cpumask);
+
+	for_each_cpu(cpu, pd->cpumask) {
+		queue = per_cpu_ptr(pd->queue, cpu);
+		reorder = &queue->reorder;
+
+		/*
+		 * Calculate the seq_nr of the object that should be
+		 * next in this queue.
+		 */
+		overrun = 0;
+		calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus)
+			       + queue->cpu_index;
+
+		if (unlikely(calc_seq_nr > pd->max_seq_nr)) {
+			calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1;
+			overrun = 1;
+		}
+
+		if (!list_empty(&reorder->list)) {
+			padata = list_entry(reorder->list.next,
+					    struct padata_priv, list);
+
+			seq_nr  = padata->seq_nr;
+			BUG_ON(calc_seq_nr != seq_nr);
+		} else {
+			seq_nr = calc_seq_nr;
+			empty++;
+		}
+
+		if (next_nr < 0 || seq_nr < next_nr
+		    || (next_overrun && !overrun)) {
+			next_nr = seq_nr;
+			next_overrun = overrun;
+			next_queue = queue;
+		}
+	}
+
+	padata = NULL;
+
+	if (empty == num_cpus)
+		goto out;
+
+	reorder = &next_queue->reorder;
+
+	if (!list_empty(&reorder->list)) {
+		padata = list_entry(reorder->list.next,
+				    struct padata_priv, list);
+
+		if (unlikely(next_overrun)) {
+			for_each_cpu(cpu, pd->cpumask) {
+				queue = per_cpu_ptr(pd->queue, cpu);
+				atomic_set(&queue->num_obj, 0);
+			}
+		}
+
+		spin_lock(&reorder->lock);
+		list_del_init(&padata->list);
+		atomic_dec(&pd->reorder_objects);
+		spin_unlock(&reorder->lock);
+
+		atomic_inc(&next_queue->num_obj);
+
+		goto out;
+	}
+
+	if (next_nr % num_cpus == next_queue->cpu_index) {
+		padata = ERR_PTR(-ENODATA);
+		goto out;
+	}
+
+	padata = ERR_PTR(-EINPROGRESS);
+out:
+	return padata;
+}
+
+static void padata_reorder(struct parallel_data *pd)
+{
+	struct padata_priv *padata;
+	struct padata_queue *queue;
+	struct padata_instance *pinst = pd->pinst;
+
+try_again:
+	if (!spin_trylock_bh(&pd->lock))
+		goto out;
+
+	while (1) {
+		padata = padata_get_next(pd);
+
+		if (!padata || PTR_ERR(padata) == -EINPROGRESS)
+			break;
+
+		if (PTR_ERR(padata) == -ENODATA) {
+			spin_unlock_bh(&pd->lock);
+			goto out;
+		}
+
+		queue = per_cpu_ptr(pd->queue, padata->cb_cpu);
+
+		spin_lock(&queue->serial.lock);
+		list_add_tail(&padata->list, &queue->serial.list);
+		spin_unlock(&queue->serial.lock);
+
+		queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork);
+	}
+
+	spin_unlock_bh(&pd->lock);
+
+	if (atomic_read(&pd->reorder_objects))
+		goto try_again;
+
+out:
+	return;
+}
+
+static void padata_serial_worker(struct work_struct *work)
+{
+	struct padata_queue *queue;
+	struct parallel_data *pd;
+	LIST_HEAD(local_list);
+
+	local_bh_disable();
+	queue = container_of(work, struct padata_queue, swork);
+	pd = queue->pd;
+
+	spin_lock(&queue->serial.lock);
+	list_replace_init(&queue->serial.list, &local_list);
+	spin_unlock(&queue->serial.lock);
+
+	while (!list_empty(&local_list)) {
+		struct padata_priv *padata;
+
+		padata = list_entry(local_list.next,
+				    struct padata_priv, list);
+
+		list_del_init(&padata->list);
+
+		padata->serial(padata);
+		atomic_dec(&pd->refcnt);
+	}
+	local_bh_enable();
+}
+
+/*
+ * padata_do_serial - padata serialization function
+ *
+ * @padata: object to be serialized.
+ *
+ * padata_do_serial must be called for every parallelized object.
+ * The serialization callback function will run with BHs off.
+ */
+void padata_do_serial(struct padata_priv *padata)
+{
+	int cpu;
+	struct padata_queue *queue;
+	struct parallel_data *pd;
+
+	pd = padata->pd;
+
+	cpu = get_cpu();
+	queue = per_cpu_ptr(pd->queue, cpu);
+
+	spin_lock(&queue->reorder.lock);
+	atomic_inc(&pd->reorder_objects);
+	list_add_tail(&padata->list, &queue->reorder.list);
+	spin_unlock(&queue->reorder.lock);
+
+	put_cpu();
+
+	padata_reorder(pd);
+}
+EXPORT_SYMBOL(padata_do_serial);
+
+static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
+					     const struct cpumask *cpumask)
+{
+	int cpu, cpu_index, num_cpus;
+	struct padata_queue *queue;
+	struct parallel_data *pd;
+
+	cpu_index = 0;
+
+	pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
+	if (!pd)
+		goto err;
+
+	pd->queue = alloc_percpu(struct padata_queue);
+	if (!pd->queue)
+		goto err_free_pd;
+
+	if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL))
+		goto err_free_queue;
+
+	for_each_possible_cpu(cpu) {
+		queue = per_cpu_ptr(pd->queue, cpu);
+
+		queue->pd = pd;
+
+		if (cpumask_test_cpu(cpu, cpumask)
+		    && cpumask_test_cpu(cpu, cpu_active_mask)) {
+			queue->cpu_index = cpu_index;
+			cpu_index++;
+		} else
+			queue->cpu_index = -1;
+
+		INIT_LIST_HEAD(&queue->reorder.list);
+		INIT_LIST_HEAD(&queue->parallel.list);
+		INIT_LIST_HEAD(&queue->serial.list);
+		spin_lock_init(&queue->reorder.lock);
+		spin_lock_init(&queue->parallel.lock);
+		spin_lock_init(&queue->serial.lock);
+
+		INIT_WORK(&queue->pwork, padata_parallel_worker);
+		INIT_WORK(&queue->swork, padata_serial_worker);
+		atomic_set(&queue->num_obj, 0);
+	}
+
+	cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
+
+	num_cpus = cpumask_weight(pd->cpumask);
+	pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1;
+
+	atomic_set(&pd->seq_nr, -1);
+	atomic_set(&pd->reorder_objects, 0);
+	atomic_set(&pd->refcnt, 0);
+	pd->pinst = pinst;
+	spin_lock_init(&pd->lock);
+
+	return pd;
+
+err_free_queue:
+	free_percpu(pd->queue);
+err_free_pd:
+	kfree(pd);
+err:
+	return NULL;
+}
+
+static void padata_free_pd(struct parallel_data *pd)
+{
+	free_cpumask_var(pd->cpumask);
+	free_percpu(pd->queue);
+	kfree(pd);
+}
+
+static void padata_replace(struct padata_instance *pinst,
+			   struct parallel_data *pd_new)
+{
+	struct parallel_data *pd_old = pinst->pd;
+
+	pinst->flags |= PADATA_RESET;
+
+	rcu_assign_pointer(pinst->pd, pd_new);
+
+	synchronize_rcu();
+
+	while (atomic_read(&pd_old->refcnt) != 0)
+		yield();
+
+	flush_workqueue(pinst->wq);
+
+	padata_free_pd(pd_old);
+
+	pinst->flags &= ~PADATA_RESET;
+}
+
+/*
+ * padata_set_cpumask - set the cpumask that padata should use
+ *
+ * @pinst: padata instance
+ * @cpumask: the cpumask to use
+ */
+int padata_set_cpumask(struct padata_instance *pinst,
+			cpumask_var_t cpumask)
+{
+	struct parallel_data *pd;
+	int err = 0;
+
+	might_sleep();
+
+	mutex_lock(&pinst->lock);
+
+	pd = padata_alloc_pd(pinst, cpumask);
+	if (!pd) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	cpumask_copy(pinst->cpumask, cpumask);
+
+	padata_replace(pinst, pd);
+
+out:
+	mutex_unlock(&pinst->lock);
+
+	return err;
+}
+EXPORT_SYMBOL(padata_set_cpumask);
+
+static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
+{
+	struct parallel_data *pd;
+
+	if (cpumask_test_cpu(cpu, cpu_active_mask)) {
+		pd = padata_alloc_pd(pinst, pinst->cpumask);
+		if (!pd)
+			return -ENOMEM;
+
+		padata_replace(pinst, pd);
+	}
+
+	return 0;
+}
+
+/*
+ * padata_add_cpu - add a cpu to the padata cpumask
+ *
+ * @pinst: padata instance
+ * @cpu: cpu to add
+ */
+int padata_add_cpu(struct padata_instance *pinst, int cpu)
+{
+	int err;
+
+	might_sleep();
+
+	mutex_lock(&pinst->lock);
+
+	cpumask_set_cpu(cpu, pinst->cpumask);
+	err = __padata_add_cpu(pinst, cpu);
+
+	mutex_unlock(&pinst->lock);
+
+	return err;
+}
+EXPORT_SYMBOL(padata_add_cpu);
+
+static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
+{
+	struct parallel_data *pd;
+
+	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
+		pd = padata_alloc_pd(pinst, pinst->cpumask);
+		if (!pd)
+			return -ENOMEM;
+
+		padata_replace(pinst, pd);
+	}
+
+	return 0;
+}
+
+/*
+ * padata_remove_cpu - remove a cpu from the padata cpumask
+ *
+ * @pinst: padata instance
+ * @cpu: cpu to remove
+ */
+int padata_remove_cpu(struct padata_instance *pinst, int cpu)
+{
+	int err;
+
+	might_sleep();
+
+	mutex_lock(&pinst->lock);
+
+	cpumask_clear_cpu(cpu, pinst->cpumask);
+	err = __padata_remove_cpu(pinst, cpu);
+
+	mutex_unlock(&pinst->lock);
+
+	return err;
+}
+EXPORT_SYMBOL(padata_remove_cpu);
+
+/*
+ * padata_start - start the parallel processing
+ *
+ * @pinst: padata instance to start
+ */
+void padata_start(struct padata_instance *pinst)
+{
+	might_sleep();
+
+	mutex_lock(&pinst->lock);
+	pinst->flags |= PADATA_INIT;
+	mutex_unlock(&pinst->lock);
+}
+EXPORT_SYMBOL(padata_start);
+
+/*
+ * padata_stop - stop the parallel processing
+ *
+ * @pinst: padata instance to stop
+ */
+void padata_stop(struct padata_instance *pinst)
+{
+	might_sleep();
+
+	mutex_lock(&pinst->lock);
+	pinst->flags &= ~PADATA_INIT;
+	mutex_unlock(&pinst->lock);
+}
+EXPORT_SYMBOL(padata_stop);
+
+static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
+					 unsigned long action, void *hcpu)
+{
+	int err;
+	struct padata_instance *pinst;
+	int cpu = (unsigned long)hcpu;
+
+	pinst = container_of(nfb, struct padata_instance, cpu_notifier);
+
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		if (!cpumask_test_cpu(cpu, pinst->cpumask))
+			break;
+		mutex_lock(&pinst->lock);
+		err = __padata_add_cpu(pinst, cpu);
+		mutex_unlock(&pinst->lock);
+		if (err)
+			return NOTIFY_BAD;
+		break;
+
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		if (!cpumask_test_cpu(cpu, pinst->cpumask))
+			break;
+		mutex_lock(&pinst->lock);
+		err = __padata_remove_cpu(pinst, cpu);
+		mutex_unlock(&pinst->lock);
+		if (err)
+			return NOTIFY_BAD;
+		break;
+
+	case CPU_UP_CANCELED:
+	case CPU_UP_CANCELED_FROZEN:
+		if (!cpumask_test_cpu(cpu, pinst->cpumask))
+			break;
+		mutex_lock(&pinst->lock);
+		__padata_remove_cpu(pinst, cpu);
+		mutex_unlock(&pinst->lock);
+
+	case CPU_DOWN_FAILED:
+	case CPU_DOWN_FAILED_FROZEN:
+		if (!cpumask_test_cpu(cpu, pinst->cpumask))
+			break;
+		mutex_lock(&pinst->lock);
+		__padata_add_cpu(pinst, cpu);
+		mutex_unlock(&pinst->lock);
+	}
+
+	return NOTIFY_OK;
+}
+
+/*
+ * padata_alloc - allocate and initialize a padata instance
+ *
+ * @cpumask: cpumask that padata uses for parallelization
+ * @wq: workqueue to use for the allocated padata instance
+ */
+struct padata_instance *padata_alloc(const struct cpumask *cpumask,
+				     struct workqueue_struct *wq)
+{
+	int err;
+	struct padata_instance *pinst;
+	struct parallel_data *pd;
+
+	pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
+	if (!pinst)
+		goto err;
+
+	pd = padata_alloc_pd(pinst, cpumask);
+	if (!pd)
+		goto err_free_inst;
+
+	rcu_assign_pointer(pinst->pd, pd);
+
+	pinst->wq = wq;
+
+	cpumask_copy(pinst->cpumask, cpumask);
+
+	pinst->flags = 0;
+
+	pinst->cpu_notifier.notifier_call = padata_cpu_callback;
+	pinst->cpu_notifier.priority = 0;
+	err = register_hotcpu_notifier(&pinst->cpu_notifier);
+	if (err)
+		goto err_free_pd;
+
+	mutex_init(&pinst->lock);
+
+	return pinst;
+
+err_free_pd:
+	padata_free_pd(pd);
+err_free_inst:
+	kfree(pinst);
+err:
+	return NULL;
+}
+EXPORT_SYMBOL(padata_alloc);
+
+/*
+ * padata_free - free a padata instance
+ *
+ * @ padata_inst: padata instance to free
+ */
+void padata_free(struct padata_instance *pinst)
+{
+	padata_stop(pinst);
+
+	synchronize_rcu();
+
+	while (atomic_read(&pinst->pd->refcnt) != 0)
+		yield();
+
+	unregister_hotcpu_notifier(&pinst->cpu_notifier);
+	padata_free_pd(pinst->pd);
+	kfree(pinst);
+}
+EXPORT_SYMBOL(padata_free);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 91e09d3..4c9cffc 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -222,3 +222,8 @@
 	  and the bus type drivers of the buses the devices are on are
 	  responsible for the actual handling of the autosuspend requests and
 	  wake-up events.
+
+config PM_OPS
+	bool
+	depends on PM_SLEEP || PM_RUNTIME
+	default y
diff --git a/kernel/resource.c b/kernel/resource.c
index af96c1e..24e9e60 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -188,6 +188,36 @@
 	return -EINVAL;
 }
 
+static void __release_child_resources(struct resource *r)
+{
+	struct resource *tmp, *p;
+	resource_size_t size;
+
+	p = r->child;
+	r->child = NULL;
+	while (p) {
+		tmp = p;
+		p = p->sibling;
+
+		tmp->parent = NULL;
+		tmp->sibling = NULL;
+		__release_child_resources(tmp);
+
+		printk(KERN_DEBUG "release child resource %pR\n", tmp);
+		/* need to restore size, and keep flags */
+		size = resource_size(tmp);
+		tmp->start = 0;
+		tmp->end = size - 1;
+	}
+}
+
+void release_child_resources(struct resource *r)
+{
+	write_lock(&resource_lock);
+	__release_child_resources(r);
+	write_unlock(&resource_lock);
+}
+
 /**
  * request_resource - request and reserve an I/O or memory resource
  * @root: root resource descriptor
@@ -303,8 +333,10 @@
 static int find_resource(struct resource *root, struct resource *new,
 			 resource_size_t size, resource_size_t min,
 			 resource_size_t max, resource_size_t align,
-			 void (*alignf)(void *, struct resource *,
-					resource_size_t, resource_size_t),
+			 resource_size_t (*alignf)(void *,
+						   const struct resource *,
+						   resource_size_t,
+						   resource_size_t),
 			 void *alignf_data)
 {
 	struct resource *this = root->child;
@@ -330,7 +362,7 @@
 			tmp.end = max;
 		tmp.start = ALIGN(tmp.start, align);
 		if (alignf)
-			alignf(alignf_data, &tmp, size, align);
+			tmp.start = alignf(alignf_data, &tmp, size, align);
 		if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) {
 			new->start = tmp.start;
 			new->end = tmp.start + size - 1;
@@ -358,8 +390,10 @@
 int allocate_resource(struct resource *root, struct resource *new,
 		      resource_size_t size, resource_size_t min,
 		      resource_size_t max, resource_size_t align,
-		      void (*alignf)(void *, struct resource *,
-				     resource_size_t, resource_size_t),
+		      resource_size_t (*alignf)(void *,
+						const struct resource *,
+						resource_size_t,
+						resource_size_t),
 		      void *alignf_data)
 {
 	int err;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 25c3ed5..d62e3cd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -355,7 +355,7 @@
 config DEBUG_KMEMLEAK
 	bool "Kernel memory leak detector"
 	depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
-		(X86 || ARM || PPC || S390)
+		(X86 || ARM || PPC || S390 || SUPERH)
 
 	select DEBUG_FS if SYSFS
 	select STACKTRACE if STACKTRACE_SUPPORT
diff --git a/mm/Kconfig b/mm/Kconfig
index 17b8947..d34c2b9 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -195,7 +195,7 @@
 config NR_QUICK
 	int
 	depends on QUICKLIST
-	default "2" if SUPERH || AVR32
+	default "2" if AVR32
 	default "1"
 
 config VIRT_TO_BUS
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index fc6ec1e..280529a 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -313,10 +313,21 @@
 	return hidp_queue_report(session, buf, rsize);
 }
 
-static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count)
+static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
+		unsigned char report_type)
 {
-	if (hidp_send_ctrl_message(hid->driver_data,
-			HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE,
+	switch (report_type) {
+	case HID_FEATURE_REPORT:
+		report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
+		break;
+	case HID_OUTPUT_REPORT:
+		report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (hidp_send_ctrl_message(hid->driver_data, report_type,
 			data, count))
 		return -ENOMEM;
 	return count;
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 743c013..8b4d6e3 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -125,6 +125,22 @@
 		.sadb_alg_maxbits = 256
 	}
 },
+{
+	.name = "rfc4543(gcm(aes))",
+
+	.uinfo = {
+		.aead = {
+			.icv_truncbits = 128,
+		}
+	},
+
+	.desc = {
+		.sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC,
+		.sadb_alg_ivlen = 8,
+		.sadb_alg_minbits = 128,
+		.sadb_alg_maxbits = 256
+	}
+},
 };
 
 static struct xfrm_algo_desc aalg_list[] = {
diff --git a/scripts/.gitignore b/scripts/.gitignore
index 52cab46a..c5d5db5 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -6,5 +6,4 @@
 pnmtologo
 bin2c
 unifdef
-binoffset
 ihex2fw
diff --git a/scripts/binoffset.c b/scripts/binoffset.c
deleted file mode 100644
index 1a2e39b..0000000
--- a/scripts/binoffset.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/***************************************************************************
- * binoffset.c
- * (C) 2002 Randy Dunlap <rdunlap@xenotime.net>
-
-#   This program is free software; you can redistribute it and/or modify
-#   it under the terms of the GNU General Public License as published by
-#   the Free Software Foundation; either version 2 of the License, or
-#   (at your option) any later version.
-#
-#   This program is distributed in the hope that it will be useful,
-#   but WITHOUT ANY WARRANTY; without even the implied warranty of
-#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#   GNU General Public License for more details.
-#
-#   You should have received a copy of the GNU General Public License
-#   along with this program; if not, write to the Free Software
-#   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-# binoffset.c:
-# - searches a (binary) file for a specified (binary) pattern
-# - returns the offset of the located pattern or ~0 if not found
-# - exits with exit status 0 normally or non-0 if pattern is not found
-#   or any other error occurs.
-
-****************************************************************/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/mman.h>
-
-#define VERSION		"0.1"
-#define BUF_SIZE	(16 * 1024)
-#define PAT_SIZE	100
-
-char		*progname;
-char		*inputname;
-int		inputfd;
-unsigned int	bix;			/* buf index */
-unsigned char	patterns [PAT_SIZE] = {0}; /* byte-sized pattern array */
-int		pat_len;		/* actual number of pattern bytes */
-unsigned char	*madr;			/* mmap address */
-size_t		filesize;
-int		num_matches = 0;
-off_t		firstloc = 0;
-
-void usage (void)
-{
-	fprintf (stderr, "%s ver. %s\n", progname, VERSION);
-	fprintf (stderr, "usage:  %s filename pattern_bytes\n",
-			progname);
-	fprintf (stderr, "        [prints location of pattern_bytes in file]\n");
-	exit (1);
-}
-
-void get_pattern (int pat_count, char *pats [])
-{
-	int ix, err, tmp;
-
-#ifdef DEBUG
-	fprintf (stderr,"get_pattern: count = %d\n", pat_count);
-	for (ix = 0; ix < pat_count; ix++)
-		fprintf (stderr, "  pat # %d:  [%s]\n", ix, pats[ix]);
-#endif
-
-	for (ix = 0; ix < pat_count; ix++) {
-		tmp = 0;
-		err = sscanf (pats[ix], "%5i", &tmp);
-		if (err != 1 || tmp > 0xff) {
-			fprintf (stderr, "pattern or value error in pattern # %d [%s]\n",
-					ix, pats[ix]);
-			usage ();
-		}
-		patterns [ix] = tmp;
-	}
-	pat_len = pat_count;
-}
-
-void search_pattern (void)
-{
-	for (bix = 0; bix < filesize; bix++) {
-		if (madr[bix] == patterns[0]) {
-			if (memcmp (&madr[bix], patterns, pat_len) == 0) {
-				if (num_matches == 0)
-					firstloc = bix;
-				num_matches++;
-			}
-		}
-	}
-}
-
-#ifdef NOTDEF
-size_t get_filesize (int fd)
-{
-	off_t end_off = lseek (fd, 0, SEEK_END);
-	lseek (fd, 0, SEEK_SET);
-	return (size_t) end_off;
-}
-#endif
-
-size_t get_filesize (int fd)
-{
-	int err;
-	struct stat stat;
-
-	err = fstat (fd, &stat);
-	fprintf (stderr, "filesize: %ld\n", err < 0 ? (long)err : stat.st_size);
-	if (err < 0)
-		return err;
-	return (size_t) stat.st_size;
-}
-
-int main (int argc, char *argv [])
-{
-	progname = argv[0];
-
-	if (argc < 3)
-		usage ();
-
-	get_pattern (argc - 2, argv + 2);
-
-	inputname = argv[1];
-
-	inputfd = open (inputname, O_RDONLY);
-	if (inputfd == -1) {
-		fprintf (stderr, "%s: cannot open '%s'\n",
-				progname, inputname);
-		exit (3);
-	}
-
-	filesize = get_filesize (inputfd);
-
-	madr = mmap (0, filesize, PROT_READ, MAP_PRIVATE, inputfd, 0);
-	if (madr == MAP_FAILED) {
-		fprintf (stderr, "mmap error = %d\n", errno);
-		close (inputfd);
-		exit (4);
-	}
-
-	search_pattern ();
-
-	if (munmap (madr, filesize))
-		fprintf (stderr, "munmap error = %d\n", errno);
-
-	if (close (inputfd))
-		fprintf (stderr, "%s: error %d closing '%s'\n",
-				progname, errno, inputname);
-
-	fprintf (stderr, "number of pattern matches = %d\n", num_matches);
-	if (num_matches == 0)
-		firstloc = ~0;
-	printf ("%ld\n", firstloc);
-	fprintf (stderr, "%ld\n", firstloc);
-
-	exit (num_matches ? 0 : 2);
-}
-
-/* end binoffset.c */
diff --git a/scripts/extract-ikconfig b/scripts/extract-ikconfig
index de233ff..37f30d3 100755
--- a/scripts/extract-ikconfig
+++ b/scripts/extract-ikconfig
@@ -1,92 +1,53 @@
 #!/bin/sh
-# extracts .config info from a [b]zImage file
-# uses: binoffset (new), dd, zcat, strings, grep
-# $arg1 is [b]zImage filename
+# ----------------------------------------------------------------------
+# extract-ikconfig - Extract the .config file from a kernel image
+#
+# This will only work when the kernel was compiled with CONFIG_IKCONFIG.
+#
+# The obscure use of the "tr" filter is to work around older versions of
+# "grep" that report the byte offset of the line instead of the pattern.
+#
+# (c) 2009, Dick Streefland <dick@streefland.net>
+# Licensed under the terms of the GNU General Public License.
+# ----------------------------------------------------------------------
 
-binoffset="./scripts/binoffset"
-test -e $binoffset || cc -o $binoffset ./scripts/binoffset.c || exit 1
+gz1='\037\213\010'
+gz2='01'
+cf1='IKCFG_ST\037\213\010'
+cf2='0123456789'
 
-IKCFG_ST="0x49 0x4b 0x43 0x46 0x47 0x5f 0x53 0x54"
-IKCFG_ED="0x49 0x4b 0x43 0x46 0x47 0x5f 0x45 0x44"
-dump_config() {
-    file="$1"
-
-    start=`$binoffset $file $IKCFG_ST 2>/dev/null`
-    [ "$?" != "0" ] && start="-1"
-    if [ "$start" -eq "-1" ]; then
-	return
-    fi
-    end=`$binoffset $file $IKCFG_ED 2>/dev/null`
-    [ "$?" != "0" ] && end="-1"
-    if [ "$end" -eq "-1" ]; then
-	return
-    fi
-
-    start=`expr $start + 8`
-    size=`expr $end - $start`
-
-    dd if="$file" ibs=1 skip="$start" count="$size" 2>/dev/null | zcat
-
-    clean_up
-    exit 0
-}
-
-
-usage()
+dump_config()
 {
-	echo "  usage: extract-ikconfig [b]zImage_filename"
-}
-
-clean_up()
-{
-	if [ "$TMPFILE" != "" ]; then
-		rm -f $TMPFILE
+	if	pos=`tr "$cf1\n$cf2" "\n$cf2=" < "$1" | grep -abo "^$cf2"`
+	then
+		pos=${pos%%:*}
+		tail -c+$(($pos+8)) "$1" | zcat -q
+		exit 0
 	fi
 }
 
-if [ $# -lt 1 ]
+# Check invocation:
+me=${0##*/}
+img=$1
+if	[ $# -ne 1 -o ! -s "$img" ]
 then
-	usage
-	exit 1
+	echo "Usage: $me <kernel-image>" >&2
+	exit 2
 fi
 
-TMPFILE=`mktemp -t ikconfig-XXXXXX` || exit 1
-image="$1"
+# Initial attempt for uncompressed images or objects:
+dump_config "$img"
 
-# vmlinux: Attempt to dump the configuration from the file directly
-dump_config "$image"
+# That didn't work, so decompress and try again:
+tmp=/tmp/ikconfig$$
+trap "rm -f $tmp" 0
+for	pos in `tr "$gz1\n$gz2" "\n$gz2=" < "$img" | grep -abo "^$gz2"`
+do
+	pos=${pos%%:*}
+	tail -c+$pos "$img" | zcat 2> /dev/null > $tmp
+	dump_config $tmp
+done
 
-GZHDR1="0x1f 0x8b 0x08 0x00"
-GZHDR2="0x1f 0x8b 0x08 0x08"
-
-ELFHDR="0x7f 0x45 0x4c 0x46"
-
-# vmlinux.gz: Check for a compressed images
-off=`$binoffset "$image" $GZHDR1 2>/dev/null`
-[ "$?" != "0" ] && off="-1"
-if [ "$off" -eq "-1" ]; then
-	off=`$binoffset "$image" $GZHDR2 2>/dev/null`
-	[ "$?" != "0" ] && off="-1"
-fi
-if [ "$off" -eq "0" ]; then
-	zcat <"$image" >"$TMPFILE"
-	dump_config "$TMPFILE"
-elif [ "$off" -ne "-1" ]; then
-	(dd ibs="$off" skip=1 count=0 && dd bs=512k) <"$image" 2>/dev/null | \
-		zcat >"$TMPFILE"
-	dump_config "$TMPFILE"
-
-# check if this is simply an ELF file
-else
-	off=`$binoffset "$image" $ELFHDR 2>/dev/null`
-	[ "$?" != "0" ] && off="-1"
-	if [ "$off" -eq "0" ]; then
-		dump_config "$image"
-	fi
-fi
-
-echo "ERROR: Unable to extract kernel configuration information."
-echo "       This kernel image may not have the config info."
-
-clean_up
+# Bail out:
+echo "$me: Cannot find kernel config." >&2
 exit 1
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 999e8a7..186c466 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -30,8 +30,17 @@
 	$(Q)mkdir -p include/generated
 	$< -s $(Kconfig)
 
+# if no path is given, then use src directory to find file
+ifdef LSMOD
+LSMOD_F := $(LSMOD)
+ifeq ($(findstring /,$(LSMOD)),)
+  LSMOD_F := $(objtree)/$(LSMOD)
+endif
+endif
+
 localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
-	$(Q)perl $< $(srctree) $(Kconfig) > .tmp.config
+	$(Q)mkdir -p include/generated
+	$(Q)perl $< $(srctree) $(Kconfig) $(LSMOD_F) > .tmp.config
 	$(Q)if [ -f .config ]; then 				\
 			cmp -s .tmp.config .config ||		\
 			(mv -f .config .config.old.1;		\
@@ -45,7 +54,8 @@
 	$(Q)rm -f .tmp.config
 
 localyesconfig: $(obj)/streamline_config.pl $(obj)/conf
-	$(Q)perl $< $(srctree) $(Kconfig) > .tmp.config
+	$(Q)mkdir -p include/generated
+	$(Q)perl $< $(srctree) $(Kconfig) $(LSMOD_F) > .tmp.config
 	$(Q)sed -i s/=m/=y/ .tmp.config
 	$(Q)if [ -f .config ]; then 				\
 			cmp -s .tmp.config .config ||		\
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 0d80082..afbd54a 100644
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -113,6 +113,7 @@
 # Get the build source and top level Kconfig file (passed in)
 my $ksource = $ARGV[0];
 my $kconfig = $ARGV[1];
+my $lsmod_file = $ARGV[2];
 
 my @makefiles = `find $ksource -name Makefile`;
 my %depends;
@@ -121,6 +122,8 @@
 my %objects;
 my $var;
 my $cont = 0;
+my $iflevel = 0;
+my @ifdeps;
 
 # prevent recursion
 my %read_kconfigs;
@@ -146,6 +149,15 @@
 	    $state = "NEW";
 	    $config = $1;
 
+	    for (my $i = 0; $i < $iflevel; $i++) {
+		if ($i) {
+		    $depends{$config} .= " " . $ifdeps[$i];
+		} else {
+		    $depends{$config} = $ifdeps[$i];
+		}
+		$state = "DEP";
+	    }
+
 	# collect the depends for the config
 	} elsif ($state eq "NEW" && /^\s*depends\s+on\s+(.*)$/) {
 	    $state = "DEP";
@@ -166,6 +178,21 @@
 	    # note if the config has a prompt
 	    $prompt{$config} = 1;
 
+	# Check for if statements
+	} elsif (/^if\s+(.*\S)\s*$/) {
+	    my $deps = $1;
+	    # remove beginning and ending non text
+	    $deps =~ s/^[^a-zA-Z0-9_]*//;
+	    $deps =~ s/[^a-zA-Z0-9_]*$//;
+
+	    my @deps = split /[^a-zA-Z0-9_]+/, $deps;
+
+	    $ifdeps[$iflevel++] = join ':', @deps;
+
+	} elsif (/^endif/) {
+
+	    $iflevel-- if ($iflevel);
+
 	# stop on "help"
 	} elsif (/^\s*help\s*$/) {
 	    $state = "NONE";
@@ -237,8 +264,36 @@
 
 my %modules;
 
-# see what modules are loaded on this system
-open(LIN,"/sbin/lsmod|") || die "Cant lsmod";
+if (defined($lsmod_file)) {
+    if ( ! -f $lsmod_file) {
+	die "$lsmod_file not found";
+    }
+    if ( -x $lsmod_file) {
+	# the file is executable, run it
+	open(LIN, "$lsmod_file|");
+    } else {
+	# Just read the contents
+	open(LIN, "$lsmod_file");
+    }
+} else {
+
+    # see what modules are loaded on this system
+    my $lsmod;
+
+    foreach $dir ( ("/sbin", "/bin", "/usr/sbin", "/usr/bin") ) {
+	if ( -x "$dir/lsmod" ) {
+	    $lsmod = "$dir/lsmod";
+	    last;
+	}
+}
+    if (!defined($lsmod)) {
+	# try just the path
+	$lsmod = "lsmod";
+    }
+
+    open(LIN,"$lsmod|") || die "Can not call lsmod with $lsmod";
+}
+
 while (<LIN>) {
 	next if (/^Module/);  # Skip the first line.
 	if (/^(\S+)/) {
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index 0d83edc..2d4d05d 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -63,12 +63,11 @@
 	spin_lock(&ima_iint_lock);
 	rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
 	spin_unlock(&ima_iint_lock);
+	radix_tree_preload_end();
 out:
 	if (rc < 0)
 		kmem_cache_free(iint_cache, iint);
 
-	radix_tree_preload_end();
-
 	return rc;
 }
 
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 68c7348..04b6145 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -128,7 +128,7 @@
 			cmap_idx = delta / NETLBL_CATMAP_MAPSIZE;
 			cmap_sft = delta % NETLBL_CATMAP_MAPSIZE;
 			c_iter->bitmap[cmap_idx]
-				|= e_iter->maps[cmap_idx] << cmap_sft;
+				|= e_iter->maps[i] << cmap_sft;
 		}
 		e_iter = e_iter->next;
 	}
diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c
index 586965f..7a437da 100644
--- a/sound/aoa/fabrics/layout.c
+++ b/sound/aoa/fabrics/layout.c
@@ -768,7 +768,7 @@
 				"required property %s not present\n", propname);
 			return -ENODEV;
 		}
-		if (*ref != codec->node->linux_phandle) {
+		if (*ref != codec->node->phandle) {
 			printk(KERN_INFO "snd-aoa-fabric-layout: "
 				"%s doesn't match!\n", propname);
 			return -ENODEV;
diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c
index 2e15646..b366793 100644
--- a/sound/ppc/awacs.c
+++ b/sound/ppc/awacs.c
@@ -751,8 +751,8 @@
 
 static void snd_pmac_awacs_resume(struct snd_pmac *chip)
 {
-	if (machine_is_compatible("PowerBook3,1")
-	    || machine_is_compatible("PowerBook3,2")) {
+	if (of_machine_is_compatible("PowerBook3,1")
+	    || of_machine_is_compatible("PowerBook3,2")) {
 		msleep(100);
 		snd_pmac_awacs_write_reg(chip, 1,
 			chip->awacs_reg[1] & ~MASK_PAROUT);
@@ -780,16 +780,16 @@
 }
 #endif /* CONFIG_PM */
 
-#define IS_PM7500 (machine_is_compatible("AAPL,7500") \
-		|| machine_is_compatible("AAPL,8500") \
-		|| machine_is_compatible("AAPL,9500"))
-#define IS_PM5500 (machine_is_compatible("AAPL,e411"))
-#define IS_BEIGE (machine_is_compatible("AAPL,Gossamer"))
-#define IS_IMAC1 (machine_is_compatible("PowerMac2,1"))
-#define IS_IMAC2 (machine_is_compatible("PowerMac2,2") \
-		|| machine_is_compatible("PowerMac4,1"))
-#define IS_G4AGP (machine_is_compatible("PowerMac3,1"))
-#define IS_LOMBARD (machine_is_compatible("PowerBook1,1"))
+#define IS_PM7500 (of_machine_is_compatible("AAPL,7500") \
+		|| of_machine_is_compatible("AAPL,8500") \
+		|| of_machine_is_compatible("AAPL,9500"))
+#define IS_PM5500 (of_machine_is_compatible("AAPL,e411"))
+#define IS_BEIGE (of_machine_is_compatible("AAPL,Gossamer"))
+#define IS_IMAC1 (of_machine_is_compatible("PowerMac2,1"))
+#define IS_IMAC2 (of_machine_is_compatible("PowerMac2,2") \
+		|| of_machine_is_compatible("PowerMac4,1"))
+#define IS_G4AGP (of_machine_is_compatible("PowerMac3,1"))
+#define IS_LOMBARD (of_machine_is_compatible("PowerBook1,1"))
 
 static int imac1, imac2;
 
diff --git a/sound/ppc/burgundy.c b/sound/ppc/burgundy.c
index 0accfe4..1f72e1c 100644
--- a/sound/ppc/burgundy.c
+++ b/sound/ppc/burgundy.c
@@ -582,7 +582,7 @@
 static void snd_pmac_burgundy_update_automute(struct snd_pmac *chip, int do_notify)
 {
 	if (chip->auto_mute) {
-		int imac = machine_is_compatible("iMac");
+		int imac = of_machine_is_compatible("iMac");
 		int reg, oreg;
 		reg = oreg = snd_pmac_burgundy_rcb(chip,
 				MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES);
@@ -620,7 +620,7 @@
  */
 int __devinit snd_pmac_burgundy_init(struct snd_pmac *chip)
 {
-	int imac = machine_is_compatible("iMac");
+	int imac = of_machine_is_compatible("iMac");
 	int i, err;
 
 	/* Checks to see the chip is alive and kicking */
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index 7bc492e..8508117 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -922,11 +922,11 @@
 	}
 
 	/* it seems the Pismo & iBook can't byte-swap in hardware. */
-	if (machine_is_compatible("PowerBook3,1") ||
-	    machine_is_compatible("PowerBook2,1"))
+	if (of_machine_is_compatible("PowerBook3,1") ||
+	    of_machine_is_compatible("PowerBook2,1"))
 		chip->can_byte_swap = 0 ;
 
-	if (machine_is_compatible("PowerBook2,1"))
+	if (of_machine_is_compatible("PowerBook2,1"))
 		chip->can_duplex = 0;
 }
 
@@ -959,11 +959,11 @@
 	chip->control_mask = MASK_IEPC | MASK_IEE | 0x11; /* default */
 
 	/* check machine type */
-	if (machine_is_compatible("AAPL,3400/2400")
-	    || machine_is_compatible("AAPL,3500"))
+	if (of_machine_is_compatible("AAPL,3400/2400")
+	    || of_machine_is_compatible("AAPL,3500"))
 		chip->is_pbook_3400 = 1;
-	else if (machine_is_compatible("PowerBook1,1")
-		 || machine_is_compatible("AAPL,PowerBook1998"))
+	else if (of_machine_is_compatible("PowerBook1,1")
+		 || of_machine_is_compatible("AAPL,PowerBook1998"))
 		chip->is_pbook_G3 = 1;
 	chip->node = of_find_node_by_name(NULL, "awacs");
 	sound = of_node_get(chip->node);
@@ -1033,8 +1033,8 @@
 	}
 	if (of_device_is_compatible(sound, "tumbler")) {
 		chip->model = PMAC_TUMBLER;
-		chip->can_capture = machine_is_compatible("PowerMac4,2")
-				|| machine_is_compatible("PowerBook4,1");
+		chip->can_capture = of_machine_is_compatible("PowerMac4,2")
+				|| of_machine_is_compatible("PowerBook4,1");
 		chip->can_duplex = 0;
 		// chip->can_byte_swap = 0; /* FIXME: check this */
 		chip->num_freqs = ARRAY_SIZE(tumbler_freqs);
diff --git a/sound/soc/fsl/efika-audio-fabric.c b/sound/soc/fsl/efika-audio-fabric.c
index 3326e2a..1a5b8e0 100644
--- a/sound/soc/fsl/efika-audio-fabric.c
+++ b/sound/soc/fsl/efika-audio-fabric.c
@@ -55,7 +55,7 @@
 	struct platform_device *pdev;
 	int rc;
 
-	if (!machine_is_compatible("bplan,efika"))
+	if (!of_machine_is_compatible("bplan,efika"))
 		return -ENODEV;
 
 	card.platform = &mpc5200_audio_dma_platform;
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index b928ef7..6644cba 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -55,7 +55,7 @@
 	struct platform_device *pdev;
 	int rc;
 
-	if (!machine_is_compatible("phytec,pcm030"))
+	if (!of_machine_is_compatible("phytec,pcm030"))
 		return -ENODEV;
 
 	card.platform = &mpc5200_audio_dma_platform;