Merge branch 'for-next' of git://git.samba.org/sfrench/cifs-2.6

Pull CIFS fixes from Steve French:
 "Two minor cifs fixes and a minor documentation cleanup for cifs.txt"

* 'for-next' of git://git.samba.org/sfrench/cifs-2.6:
  cifs: update cifs.txt and remove some outdated infos
  cifs: Avoid calling unlock_page() twice in cifs_readpage() when using fscache
  cifs: Do not take a reference to the page in cifs_readpage_worker()
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index cc92ca8..6edaa65 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,13 @@
+Release Date    : Sat. Aug 31, 2013 17:00:00 PST 2013 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+			Kashyap Desai
+			Sumit Saxena
+Current Version : 06.700.06.00-rc1
+Old Version     : 06.600.18.00-rc1
+    1. Add High Availability clustering support using shared Logical Disks.
+    2. Version and Changelog update.
+-------------------------------------------------------------------------------
 Release Date    : Wed. May 15, 2013 17:00:00 PST 2013 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Adam Radford
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
index 824e08c..4b3e3a4 100644
--- a/arch/mips/dec/ioasic-irq.c
+++ b/arch/mips/dec/ioasic-irq.c
@@ -51,6 +51,14 @@
 	.irq_unmask = unmask_ioasic_irq,
 };
 
+void clear_ioasic_dma_irq(unsigned int irq)
+{
+	u32 sir;
+
+	sir = ~(1 << (irq - ioasic_irq_base));
+	ioasic_write(IO_REG_SIR, sir);
+}
+
 static struct irq_chip ioasic_dma_irq_type = {
 	.name = "IO-ASIC-DMA",
 	.irq_ack = ack_ioasic_irq,
diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c
index 56ebc7f..1914e56 100644
--- a/arch/mips/dec/time.c
+++ b/arch/mips/dec/time.c
@@ -125,12 +125,16 @@
 
 void __init plat_time_init(void)
 {
+	int ioasic_clock = 0;
 	u32 start, end;
 	int i = HZ / 8;
 
 	/* Set up the rate of periodic DS1287 interrupts. */
 	ds1287_set_base_clock(HZ);
 
+	/* On some I/O ASIC systems we have the I/O ASIC's counter.  */
+	if (IOASIC)
+		ioasic_clock = dec_ioasic_clocksource_init() == 0;
 	if (cpu_has_counter) {
 		ds1287_timer_state();
 		while (!ds1287_timer_state())
@@ -147,9 +151,21 @@
 		mips_hpt_frequency = (end - start) * 8;
 		printk(KERN_INFO "MIPS counter frequency %dHz\n",
 			mips_hpt_frequency);
-	} else if (IOASIC)
-		/* For pre-R4k systems we use the I/O ASIC's counter.  */
-		dec_ioasic_clocksource_init();
+
+		/*
+		 * All R4k DECstations suffer from the CP0 Count erratum,
+		 * so we can't use the timer as a clock source, and a clock
+		 * event both at a time.  An accurate wall clock is more
+		 * important than a high-precision interval timer so only
+		 * use the timer as a clock source, and not a clock event
+		 * if there's no I/O ASIC counter available to serve as a
+		 * clock source.
+		 */
+		if (!ioasic_clock) {
+			init_r4k_clocksource();
+			mips_hpt_frequency = 0;
+		}
+	}
 
 	ds1287_clockevent_init(dec_interrupt[DEC_IRQ_RTC]);
 }
diff --git a/arch/mips/include/asm/dec/ioasic.h b/arch/mips/include/asm/dec/ioasic.h
index 98badd6..a6e505a 100644
--- a/arch/mips/include/asm/dec/ioasic.h
+++ b/arch/mips/include/asm/dec/ioasic.h
@@ -31,8 +31,10 @@
 	return ioasic_base[reg / 4];
 }
 
+extern void clear_ioasic_dma_irq(unsigned int irq);
+
 extern void init_ioasic_irqs(int base);
 
-extern void dec_ioasic_clocksource_init(void);
+extern int dec_ioasic_clocksource_init(void);
 
 #endif /* __ASM_DEC_IOASIC_H */
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
index 87e88fe..6cbbf6e 100644
--- a/arch/mips/kernel/csrc-ioasic.c
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -37,7 +37,7 @@
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-void __init dec_ioasic_clocksource_init(void)
+int __init dec_ioasic_clocksource_init(void)
 {
 	unsigned int freq;
 	u32 start, end;
@@ -56,8 +56,14 @@
 	end = dec_ioasic_hpt_read(&clocksource_dec);
 
 	freq = (end - start) * 8;
+
+	/* An early revision of the I/O ASIC didn't have the counter.  */
+	if (!freq)
+		return -ENXIO;
+
 	printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
 
 	clocksource_dec.rating = 200 + freq / 10000000;
 	clocksource_register_hz(&clocksource_dec, freq);
+	return 0;
 }
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index c2e5d74..5969f1e 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -99,7 +99,9 @@
 
 	c->core = (read_c0_ebase() >> 1) & 0x1ff;
 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
-	c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
+	if (cpu_has_mipsmt)
+		c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
+			TCBIND_CURVPE;
 #endif
 #ifdef CONFIG_MIPS_MT_SMTC
 	c->tc_id  = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
@@ -177,9 +179,16 @@
 	}
 
 	if (cpu_has_mipsmt) {
-		unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
+		unsigned int nvpe = 1;
+#ifdef CONFIG_MIPS_MT_SMP
+		unsigned int mvpconf0 = read_c0_mvpconf0();
+
+		nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+#elif defined(CONFIG_MIPS_MT_SMTC)
+		unsigned int mvpconf0 = read_c0_mvpconf0();
 
 		nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+#endif
 		smp_num_siblings = nvpe;
 	}
 	pr_info("Detected %i available secondary CPU(s)\n", ncpu);
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index faf84c5..59b2b3c 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -1368,7 +1368,7 @@
 }
 static DEVICE_ATTR_RW(ntcs);
 
-static struct attribute vpe_attrs[] = {
+static struct attribute *vpe_attrs[] = {
 	&dev_attr_kill.attr,
 	&dev_attr_ntcs.attr,
 	NULL,
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 1a5ec9a..1eb09ee 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -186,6 +186,7 @@
  */
 static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors)
 {
+	uint32_t sz = 0;
 	int i, part = 0, ret = 0; /* invalid by default */
 
 	if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
@@ -216,12 +217,15 @@
 	/*
 	 * Protective MBRs take up the lesser of the whole disk
 	 * or 2 TiB (32bit LBA), ignoring the rest of the disk.
+	 * Some partitioning programs, nonetheless, choose to set
+	 * the size to the maximum 32-bit limitation, disregarding
+	 * the disk size.
 	 *
 	 * Hybrid MBRs do not necessarily comply with this.
 	 */
 	if (ret == GPT_MBR_PROTECTIVE) {
-		if (le32_to_cpu(mbr->partition_record[part].size_in_lba) !=
-		    min((uint32_t) total_sectors - 1, 0xFFFFFFFF))
+		sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
+		if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
 			ret = 0;
 	}
 done:
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index d2b34fb..b6ded17 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -48,6 +48,7 @@
 	struct evdev *evdev;
 	struct list_head node;
 	int clkid;
+	bool revoked;
 	unsigned int bufsize;
 	struct input_event buffer[];
 };
@@ -164,6 +165,9 @@
 	struct input_event event;
 	bool wakeup = false;
 
+	if (client->revoked)
+		return;
+
 	event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
 				      mono : real);
 
@@ -240,7 +244,7 @@
 	if (retval)
 		return retval;
 
-	if (!evdev->exist)
+	if (!evdev->exist || client->revoked)
 		retval = -ENODEV;
 	else
 		retval = input_flush_device(&evdev->handle, file);
@@ -429,7 +433,7 @@
 	if (retval)
 		return retval;
 
-	if (!evdev->exist) {
+	if (!evdev->exist || client->revoked) {
 		retval = -ENODEV;
 		goto out;
 	}
@@ -482,7 +486,7 @@
 		return -EINVAL;
 
 	for (;;) {
-		if (!evdev->exist)
+		if (!evdev->exist || client->revoked)
 			return -ENODEV;
 
 		if (client->packet_head == client->tail &&
@@ -511,7 +515,7 @@
 		if (!(file->f_flags & O_NONBLOCK)) {
 			error = wait_event_interruptible(evdev->wait,
 					client->packet_head != client->tail ||
-					!evdev->exist);
+					!evdev->exist || client->revoked);
 			if (error)
 				return error;
 		}
@@ -529,7 +533,11 @@
 
 	poll_wait(file, &evdev->wait, wait);
 
-	mask = evdev->exist ? POLLOUT | POLLWRNORM : POLLHUP | POLLERR;
+	if (evdev->exist && !client->revoked)
+		mask = POLLOUT | POLLWRNORM;
+	else
+		mask = POLLHUP | POLLERR;
+
 	if (client->packet_head != client->tail)
 		mask |= POLLIN | POLLRDNORM;
 
@@ -795,6 +803,17 @@
 	return 0;
 }
 
+static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
+			struct file *file)
+{
+	client->revoked = true;
+	evdev_ungrab(evdev, client);
+	input_flush_device(&evdev->handle, file);
+	wake_up_interruptible(&evdev->wait);
+
+	return 0;
+}
+
 static long evdev_do_ioctl(struct file *file, unsigned int cmd,
 			   void __user *p, int compat_mode)
 {
@@ -857,6 +876,12 @@
 		else
 			return evdev_ungrab(evdev, client);
 
+	case EVIOCREVOKE:
+		if (p)
+			return -EINVAL;
+		else
+			return evdev_revoke(evdev, client, file);
+
 	case EVIOCSCLOCKID:
 		if (copy_from_user(&i, p, sizeof(unsigned int)))
 			return -EFAULT;
@@ -1002,7 +1027,7 @@
 	if (retval)
 		return retval;
 
-	if (!evdev->exist) {
+	if (!evdev->exist || client->revoked) {
 		retval = -ENODEV;
 		goto out;
 	}
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 1542751..f5aa4b0 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1343,7 +1343,7 @@
 static int invalidate_fastmap(struct ubi_device *ubi,
 			      struct ubi_fastmap_layout *fm)
 {
-	int ret, i;
+	int ret;
 	struct ubi_vid_hdr *vh;
 
 	ret = erase_block(ubi, fm->e[0]->pnum);
@@ -1360,9 +1360,6 @@
 	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
 
-	for (i = 0; i < fm->used_blocks; i++)
-		ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
-
 	return ret;
 }
 
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 5df49d3..c95bfb1 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1069,6 +1069,9 @@
 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
 			dbg_wl("no WL needed: min used EC %d, max free EC %d",
 			       e1->ec, e2->ec);
+
+			/* Give the unused PEB back */
+			wl_tree_add(e2, &ubi->free);
 			goto out_cancel;
 		}
 		self_check_in_wl_tree(ubi, e1, &ubi->used);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 3d86ffe..94edc9c 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -725,6 +725,7 @@
 {
 	struct net_device *dev = dev_id;
 
+	clear_ioasic_dma_irq(irq);
 	printk(KERN_ERR "%s: DMA error\n", dev->name);
 	return IRQ_HANDLED;
 }
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 36a9e60..96d6b2e 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -732,6 +732,7 @@
 	tristate "Samsung Laptop driver"
 	depends on X86
 	depends on RFKILL || RFKILL = n
+	depends on ACPI_VIDEO || ACPI_VIDEO = n
 	depends on BACKLIGHT_CLASS_DEVICE
 	select LEDS_CLASS
 	select NEW_LEDS
@@ -764,7 +765,7 @@
 
 config SAMSUNG_Q10
 	tristate "Samsung Q10 Extras"
-	depends on SERIO_I8042
+	depends on ACPI
 	select BACKLIGHT_CLASS_DEVICE
 	---help---
 	  This driver provides support for backlight control on Samsung Q10
diff --git a/drivers/platform/x86/amilo-rfkill.c b/drivers/platform/x86/amilo-rfkill.c
index 6296f07..da36b5e 100644
--- a/drivers/platform/x86/amilo-rfkill.c
+++ b/drivers/platform/x86/amilo-rfkill.c
@@ -85,6 +85,13 @@
 	{
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+			DMI_MATCH(DMI_BOARD_NAME, "AMILO L1310"),
+		},
+		.driver_data = (void *)&amilo_a1655_rfkill_ops
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
 			DMI_MATCH(DMI_BOARD_NAME, "AMILO M7440"),
 		},
 		.driver_data = (void *)&amilo_m7440_rfkill_ops
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 36e5e6c..6dfa8d3 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -590,7 +590,7 @@
 	inputdev = dev_get_drvdata(&acpi->dev);
 	accel = dev_get_drvdata(&inputdev->dev);
 
-	r = strict_strtoul(buf, 0, &sensitivity);
+	r = kstrtoul(buf, 0, &sensitivity);
 	if (r)
 		return r;
 
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 475cc52..eaa78ed 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -425,7 +425,8 @@
 	struct compal_data *data = dev_get_drvdata(dev);
 	long val;
 	int err;
-	err = strict_strtol(buf, 10, &val);
+
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 	if (val < 0)
@@ -463,7 +464,8 @@
 	struct compal_data *data = dev_get_drvdata(dev);
 	long val;
 	int err;
-	err = strict_strtol(buf, 10, &val);
+
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 	if (val < 0 || val > 255)
@@ -1081,7 +1083,6 @@
 	hwmon_device_unregister(data->hwmon_dev);
 	power_supply_unregister(&data->psy);
 
-	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 
 	sysfs_remove_group(&pdev->dev.kobj, &compal_attribute_group);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index d6970f4..1c86fa0 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -725,7 +725,7 @@
 					   (void *) HPWMI_WWAN);
 		if (!wwan_rfkill) {
 			err = -ENOMEM;
-			goto register_gps_error;
+			goto register_bluetooth_error;
 		}
 		rfkill_init_sw_state(wwan_rfkill,
 				     hp_wmi_get_sw_state(HPWMI_WWAN));
@@ -733,7 +733,7 @@
 				    hp_wmi_get_hw_state(HPWMI_WWAN));
 		err = rfkill_register(wwan_rfkill);
 		if (err)
-			goto register_wwan_err;
+			goto register_wwan_error;
 	}
 
 	if (wireless & 0x8) {
@@ -743,7 +743,7 @@
 						(void *) HPWMI_GPS);
 		if (!gps_rfkill) {
 			err = -ENOMEM;
-			goto register_bluetooth_error;
+			goto register_wwan_error;
 		}
 		rfkill_init_sw_state(gps_rfkill,
 				     hp_wmi_get_sw_state(HPWMI_GPS));
@@ -755,16 +755,16 @@
 	}
 
 	return 0;
-register_wwan_err:
-	rfkill_destroy(wwan_rfkill);
-	wwan_rfkill = NULL;
-	if (gps_rfkill)
-		rfkill_unregister(gps_rfkill);
 register_gps_error:
 	rfkill_destroy(gps_rfkill);
 	gps_rfkill = NULL;
 	if (bluetooth_rfkill)
 		rfkill_unregister(bluetooth_rfkill);
+register_wwan_error:
+	rfkill_destroy(wwan_rfkill);
+	wwan_rfkill = NULL;
+	if (gps_rfkill)
+		rfkill_unregister(gps_rfkill);
 register_bluetooth_error:
 	rfkill_destroy(bluetooth_rfkill);
 	bluetooth_rfkill = NULL;
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c
index 9385afd..41b740c 100644
--- a/drivers/platform/x86/intel-rst.c
+++ b/drivers/platform/x86/intel-rst.c
@@ -193,17 +193,6 @@
 	},
 };
 
-static int irst_init(void)
-{
-	return acpi_bus_register_driver(&irst_driver);
-}
-
-static void irst_exit(void)
-{
-	acpi_bus_unregister_driver(&irst_driver);
-}
-
-module_init(irst_init);
-module_exit(irst_exit);
+module_acpi_driver(irst_driver);
 
 MODULE_DEVICE_TABLE(acpi, irst_ids);
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c
index f74e93d..52259dc 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel-smartconnect.c
@@ -74,17 +74,6 @@
 	},
 };
 
-static int smartconnect_init(void)
-{
-	return acpi_bus_register_driver(&smartconnect_driver);
-}
-
-static void smartconnect_exit(void)
-{
-	acpi_bus_unregister_driver(&smartconnect_driver);
-}
-
-module_init(smartconnect_init);
-module_exit(smartconnect_exit);
+module_acpi_driver(smartconnect_driver);
 
 MODULE_DEVICE_TABLE(acpi, smartconnect_ids);
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index f59683a..6b18aba 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -128,7 +128,6 @@
 
 	free_irq(irq, input);
 	input_unregister_device(input);
-	platform_set_drvdata(pdev, NULL);
 
 	return 0;
 }
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 81c491e..93fab8b 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -542,7 +542,6 @@
 	}
 
 	kfree(pinfo);
-	platform_set_drvdata(pdev, NULL);
 
 	/* Stop the ADC */
 	return configure_adc(0);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 984253d..10d12b2 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -643,23 +643,6 @@
 	return result;
 }
 
-static int __init acpi_pcc_init(void)
-{
-	int result = 0;
-
-	if (acpi_disabled)
-		return -ENODEV;
-
-	result = acpi_bus_register_driver(&acpi_pcc_driver);
-	if (result < 0) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-				  "Error registering hotkey driver\n"));
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
 static int acpi_pcc_hotkey_remove(struct acpi_device *device)
 {
 	struct pcc_acpi *pcc = acpi_driver_data(device);
@@ -679,10 +662,4 @@
 	return 0;
 }
 
-static void __exit acpi_pcc_exit(void)
-{
-	acpi_bus_unregister_driver(&acpi_pcc_driver);
-}
-
-module_init(acpi_pcc_init);
-module_exit(acpi_pcc_exit);
+module_acpi_driver(acpi_pcc_driver);
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
index 4430b8c..cae7098 100644
--- a/drivers/platform/x86/samsung-q10.c
+++ b/drivers/platform/x86/samsung-q10.c
@@ -14,16 +14,12 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/backlight.h>
-#include <linux/i8042.h>
 #include <linux/dmi.h>
+#include <acpi/acpi_drivers.h>
 
-#define SAMSUNGQ10_BL_MAX_INTENSITY      255
-#define SAMSUNGQ10_BL_DEFAULT_INTENSITY  185
+#define SAMSUNGQ10_BL_MAX_INTENSITY 7
 
-#define SAMSUNGQ10_BL_8042_CMD           0xbe
-#define SAMSUNGQ10_BL_8042_DATA          { 0x89, 0x91 }
-
-static int samsungq10_bl_brightness;
+static acpi_handle ec_handle;
 
 static bool force;
 module_param(force, bool, 0);
@@ -33,21 +29,26 @@
 static int samsungq10_bl_set_intensity(struct backlight_device *bd)
 {
 
-	int brightness = bd->props.brightness;
-	unsigned char c[3] = SAMSUNGQ10_BL_8042_DATA;
+	acpi_status status;
+	int i;
 
-	c[2] = (unsigned char)brightness;
-	i8042_lock_chip();
-	i8042_command(c, (0x30 << 8) | SAMSUNGQ10_BL_8042_CMD);
-	i8042_unlock_chip();
-	samsungq10_bl_brightness = brightness;
+	for (i = 0; i < SAMSUNGQ10_BL_MAX_INTENSITY; i++) {
+		status = acpi_evaluate_object(ec_handle, "_Q63", NULL, NULL);
+		if (ACPI_FAILURE(status))
+			return -EIO;
+	}
+	for (i = 0; i < bd->props.brightness; i++) {
+		status = acpi_evaluate_object(ec_handle, "_Q64", NULL, NULL);
+		if (ACPI_FAILURE(status))
+			return -EIO;
+	}
 
 	return 0;
 }
 
 static int samsungq10_bl_get_intensity(struct backlight_device *bd)
 {
-	return samsungq10_bl_brightness;
+	return bd->props.brightness;
 }
 
 static const struct backlight_ops samsungq10_bl_ops = {
@@ -55,28 +56,6 @@
 	.update_status	= samsungq10_bl_set_intensity,
 };
 
-#ifdef CONFIG_PM_SLEEP
-static int samsungq10_suspend(struct device *dev)
-{
-	return 0;
-}
-
-static int samsungq10_resume(struct device *dev)
-{
-
-	struct backlight_device *bd = dev_get_drvdata(dev);
-
-	samsungq10_bl_set_intensity(bd);
-	return 0;
-}
-#else
-#define samsungq10_suspend NULL
-#define samsungq10_resume  NULL
-#endif
-
-static SIMPLE_DEV_PM_OPS(samsungq10_pm_ops,
-			  samsungq10_suspend, samsungq10_resume);
-
 static int samsungq10_probe(struct platform_device *pdev)
 {
 
@@ -93,9 +72,6 @@
 
 	platform_set_drvdata(pdev, bd);
 
-	bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
-	samsungq10_bl_set_intensity(bd);
-
 	return 0;
 }
 
@@ -104,9 +80,6 @@
 
 	struct backlight_device *bd = platform_get_drvdata(pdev);
 
-	bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
-	samsungq10_bl_set_intensity(bd);
-
 	backlight_device_unregister(bd);
 
 	return 0;
@@ -116,7 +89,6 @@
 	.driver		= {
 		.name	= KBUILD_MODNAME,
 		.owner	= THIS_MODULE,
-		.pm	= &samsungq10_pm_ops,
 	},
 	.probe		= samsungq10_probe,
 	.remove		= samsungq10_remove,
@@ -172,6 +144,11 @@
 	if (!force && !dmi_check_system(samsungq10_dmi_table))
 		return -ENODEV;
 
+	ec_handle = ec_get_handle();
+
+	if (!ec_handle)
+		return -ENODEV;
+
 	samsungq10_device = platform_create_bundle(&samsungq10_driver,
 						   samsungq10_probe,
 						   NULL, 0, NULL, 0);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index be67e5e..03ca6c1 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -369,7 +369,7 @@
 	struct led_classdev led_classdev;
 	struct work_struct work;
 	enum led_status_t new_state;
-	unsigned int led;
+	int led;
 };
 
 /* brightness level capabilities */
@@ -5296,6 +5296,16 @@
 
 	led_supported = led_init_detect_mode();
 
+	if (led_supported != TPACPI_LED_NONE) {
+		useful_leds = tpacpi_check_quirks(led_useful_qtable,
+				ARRAY_SIZE(led_useful_qtable));
+
+		if (!useful_leds) {
+			led_handle = NULL;
+			led_supported = TPACPI_LED_NONE;
+		}
+	}
+
 	vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n",
 		str_supported(led_supported), led_supported);
 
@@ -5309,10 +5319,9 @@
 		return -ENOMEM;
 	}
 
-	useful_leds = tpacpi_check_quirks(led_useful_qtable,
-					  ARRAY_SIZE(led_useful_qtable));
-
 	for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
+		tpacpi_leds[i].led = -1;
+
 		if (!tpacpi_is_led_restricted(i) &&
 		    test_bit(i, &useful_leds)) {
 			rc = tpacpi_init_led(i);
@@ -5370,9 +5379,13 @@
 		return -ENODEV;
 
 	while ((cmd = next_cmd(&buf))) {
-		if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 15)
+		if (sscanf(cmd, "%d", &led) != 1)
 			return -EINVAL;
 
+		if (led < 0 || led > (TPACPI_LED_NUMLEDS - 1) ||
+				tpacpi_leds[led].led < 0)
+			return -ENODEV;
+
 		if (strstr(cmd, "off")) {
 			s = TPACPI_LED_OFF;
 		} else if (strstr(cmd, "on")) {
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 6e02c95..601ea95 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -780,7 +780,7 @@
 /*
  * Parse the _WDG method for the GUID data blocks
  */
-static acpi_status parse_wdg(acpi_handle handle)
+static int parse_wdg(acpi_handle handle)
 {
 	struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
 	union acpi_object *obj;
@@ -812,7 +812,7 @@
 
 		wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
 		if (!wblock)
-			return AE_NO_MEMORY;
+			return -ENOMEM;
 
 		wblock->handle = handle;
 		wblock->gblock = gblock[i];
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 6917b4f..22d5a94 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -692,7 +692,7 @@
 	 * ID as valid.
 	 */
 	if (ahc_get_pci_function(pci) > 0
-	 && ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice)
+	 && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor)
 	 && SUBID_9005_MFUNCENB(subdevice) == 0)
 		return (NULL);
 
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 8582929..2ec3c23 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -860,8 +860,13 @@
 		return false;
 	}
 
+	if (fsc->command >= cmdcnt) {
+		fs->status = ATTO_STS_INV_FUNC;
+		return false;
+	}
+
 	func = cmd_to_fls_func[fsc->command];
-	if (fsc->command >= cmdcnt || func == 0xFF) {
+	if (func == 0xFF) {
 		fs->status = ATTO_STS_INV_FUNC;
 		return false;
 	}
@@ -1355,7 +1360,7 @@
 	u32 time = jiffies_to_msecs(jiffies);
 
 	esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
-	memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
+	*n = default_sas_nvram;
 	n->sas_addr[3] |= 0x0F;
 	n->sas_addr[4] = HIBYTE(LOWORD(time));
 	n->sas_addr[5] = LOBYTE(LOWORD(time));
@@ -1373,7 +1378,7 @@
 	 * address out first.
 	 */
 	memcpy(&sas_addr[0], a->nvram->sas_addr, 8);
-	memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
+	*nvram = default_sas_nvram;
 	memcpy(&nvram->sas_addr[0], &sas_addr[0], 8);
 }
 
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 3a798e7..da1869d 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -665,7 +665,7 @@
 
 int esas2r_cleanup(struct Scsi_Host *host)
 {
-	struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+	struct esas2r_adapter *a;
 	int index;
 
 	if (host == NULL) {
@@ -678,6 +678,7 @@
 	}
 
 	esas2r_debug("esas2r_cleanup called for host %p", host);
+	a = (struct esas2r_adapter *)host->hostdata;
 	index = a->index;
 	esas2r_kill_adapter(index);
 	return index;
@@ -808,7 +809,7 @@
 	int pcie_cap_reg;
 
 	pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
-	if (0xffff && pcie_cap_reg) {
+	if (0xffff & pcie_cap_reg) {
 		u16 devcontrol;
 
 		pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
@@ -1550,8 +1551,7 @@
 	 * to not overwrite a previous crash that was saved.
 	 */
 	if ((a->flags2 & AF2_COREDUMP_AVAIL)
-	    && !(a->flags2 & AF2_COREDUMP_SAVED)
-	    && a->fw_coredump_buff) {
+	    && !(a->flags2 & AF2_COREDUMP_SAVED)) {
 		esas2r_read_mem_block(a,
 				      a->fw_coredump_buff,
 				      MW_DATA_ADDR_SRAM + 0x80000,
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index f3d0cb8..e5b0902 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -415,7 +415,7 @@
 		lun = tm->lun;
 	}
 
-	if (path > 0 || tid > ESAS2R_MAX_ID) {
+	if (path > 0) {
 		rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
 			CSMI_STS_INV_PARAM);
 		return false;
diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c
index f8ec6d6..fd13928 100644
--- a/drivers/scsi/esas2r/esas2r_vda.c
+++ b/drivers/scsi/esas2r/esas2r_vda.c
@@ -302,6 +302,7 @@
 		if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
 			struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
 			struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
+			char buf[sizeof(cfg->data.init.fw_release) + 1];
 
 			cfg->data_length =
 				cpu_to_le32(sizeof(struct atto_vda_cfg_init));
@@ -309,11 +310,13 @@
 				le32_to_cpu(rsp->vda_version);
 			cfg->data.init.fw_build = rsp->fw_build;
 
-			sprintf((char *)&cfg->data.init.fw_release,
-				"%1d.%02d",
+			snprintf(buf, sizeof(buf), "%1d.%02d",
 				(int)LOBYTE(le16_to_cpu(rsp->fw_release)),
 				(int)HIBYTE(le16_to_cpu(rsp->fw_release)));
 
+			memcpy(&cfg->data.init.fw_release, buf,
+			       sizeof(cfg->data.init.fw_release));
+
 			if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
 				cfg->data.init.fw_version =
 					cfg->data.init.fw_build;
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index c18c681..e4dd3d7 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -43,6 +43,8 @@
 #define DFX                     DRV_NAME "%d: "
 
 #define DESC_CLEAN_LOW_WATERMARK 8
+#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD	16 /* UCSM default throttle count */
+#define FNIC_MIN_IO_REQ			256 /* Min IO throttle count */
 #define FNIC_MAX_IO_REQ		2048 /* scsi_cmnd tag map entries */
 #define	FNIC_IO_LOCKS		64 /* IO locks: power of 2 */
 #define FNIC_DFLT_QUEUE_DEPTH	32
@@ -154,6 +156,9 @@
 	FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING,			\
 			 shost_printk(kern_level, host, fmt, ##args);)
 
+#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...)          \
+	shost_printk(kern_level, host, fmt, ##args)
+
 extern const char *fnic_state_str[];
 
 enum fnic_intx_intr_index {
@@ -215,10 +220,12 @@
 
 	struct vnic_stats *stats;
 	unsigned long stats_time;	/* time of stats update */
+	unsigned long stats_reset_time; /* time of stats reset */
 	struct vnic_nic_cfg *nic_cfg;
 	char name[IFNAMSIZ];
 	struct timer_list notify_timer; /* used for MSI interrupts */
 
+	unsigned int fnic_max_tag_id;
 	unsigned int err_intr_offset;
 	unsigned int link_intr_offset;
 
@@ -359,4 +366,5 @@
 	return ((fnic->state_flags & st_flags) == st_flags);
 }
 void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
+void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
 #endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 42e15ee..bbf81ea 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -74,6 +74,10 @@
 MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
 					"for fnic trace buffer");
 
+static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
+module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
+
 static struct libfc_function_template fnic_transport_template = {
 	.frame_send = fnic_send,
 	.lport_set_port_id = fnic_set_port_id,
@@ -91,7 +95,7 @@
 	if (!rport || fc_remote_port_chkready(rport))
 		return -ENXIO;
 
-	scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
+	scsi_activate_tcq(sdev, fnic_max_qdepth);
 	return 0;
 }
 
@@ -126,6 +130,7 @@
 static void fnic_get_host_speed(struct Scsi_Host *shost);
 static struct scsi_transport_template *fnic_fc_transport;
 static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
+static void fnic_reset_host_stats(struct Scsi_Host *);
 
 static struct fc_function_template fnic_fc_functions = {
 
@@ -153,6 +158,7 @@
 	.set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
 	.issue_fc_host_lip = fnic_reset,
 	.get_fc_host_stats = fnic_get_stats,
+	.reset_fc_host_stats = fnic_reset_host_stats,
 	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
 	.terminate_rport_io = fnic_terminate_rport_io,
 	.bsg_request = fc_lport_bsg_request,
@@ -206,13 +212,116 @@
 	stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
 	stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
 	stats->invalid_crc_count = vs->rx.rx_crc_errors;
-	stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ;
+	stats->seconds_since_last_reset =
+			(jiffies - fnic->stats_reset_time) / HZ;
 	stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
 	stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
 
 	return stats;
 }
 
+/*
+ * fnic_dump_fchost_stats
+ * note : dumps fc_statistics into system logs
+ */
+void fnic_dump_fchost_stats(struct Scsi_Host *host,
+				struct fc_host_statistics *stats)
+{
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: seconds since last reset = %llu\n",
+			stats->seconds_since_last_reset);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: tx frames		= %llu\n",
+			stats->tx_frames);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: tx words		= %llu\n",
+			stats->tx_words);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: rx frames		= %llu\n",
+			stats->rx_frames);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: rx words		= %llu\n",
+			stats->rx_words);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: lip count		= %llu\n",
+			stats->lip_count);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: nos count		= %llu\n",
+			stats->nos_count);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: error frames		= %llu\n",
+			stats->error_frames);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: dumped frames	= %llu\n",
+			stats->dumped_frames);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: link failure count	= %llu\n",
+			stats->link_failure_count);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: loss of sync count	= %llu\n",
+			stats->loss_of_sync_count);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: loss of signal count	= %llu\n",
+			stats->loss_of_signal_count);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: prim seq protocol err count = %llu\n",
+			stats->prim_seq_protocol_err_count);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: invalid tx word count= %llu\n",
+			stats->invalid_tx_word_count);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: invalid crc count	= %llu\n",
+			stats->invalid_crc_count);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: fcp input requests	= %llu\n",
+			stats->fcp_input_requests);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: fcp output requests	= %llu\n",
+			stats->fcp_output_requests);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: fcp control requests	= %llu\n",
+			stats->fcp_control_requests);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: fcp input megabytes	= %llu\n",
+			stats->fcp_input_megabytes);
+	FNIC_MAIN_NOTE(KERN_NOTICE, host,
+			"fnic: fcp output megabytes	= %llu\n",
+			stats->fcp_output_megabytes);
+	return;
+}
+
+/*
+ * fnic_reset_host_stats : clears host stats
+ * note : called when reset_statistics set under sysfs dir
+ */
+static void fnic_reset_host_stats(struct Scsi_Host *host)
+{
+	int ret;
+	struct fc_lport *lp = shost_priv(host);
+	struct fnic *fnic = lport_priv(lp);
+	struct fc_host_statistics *stats;
+	unsigned long flags;
+
+	/* dump current stats, before clearing them */
+	stats = fnic_get_stats(host);
+	fnic_dump_fchost_stats(host, stats);
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	ret = vnic_dev_stats_clear(fnic->vdev);
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	if (ret) {
+		FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
+				"fnic: Reset vnic stats failed"
+				" 0x%x", ret);
+		return;
+	}
+	fnic->stats_reset_time = jiffies;
+	memset(stats, 0, sizeof(*stats));
+
+	return;
+}
+
 void fnic_log_q_error(struct fnic *fnic)
 {
 	unsigned int i;
@@ -447,13 +556,6 @@
 
 	host->transportt = fnic_fc_transport;
 
-	err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
-	if (err) {
-		shost_printk(KERN_ERR, fnic->lport->host,
-			     "Unable to alloc shared tag map\n");
-		goto err_out_free_hba;
-	}
-
 	/* Setup PCI resources */
 	pci_set_drvdata(pdev, fnic);
 
@@ -476,10 +578,10 @@
 	pci_set_master(pdev);
 
 	/* Query PCI controller on system for DMA addressing
-	 * limitation for the device.  Try 40-bit first, and
+	 * limitation for the device.  Try 64-bit first, and
 	 * fail to 32-bit.
 	 */
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
 	if (err) {
 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (err) {
@@ -496,10 +598,10 @@
 			goto err_out_release_regions;
 		}
 	} else {
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 		if (err) {
 			shost_printk(KERN_ERR, fnic->lport->host,
-				     "Unable to obtain 40-bit DMA "
+				     "Unable to obtain 64-bit DMA "
 				     "for consistent allocations, aborting.\n");
 			goto err_out_release_regions;
 		}
@@ -566,6 +668,22 @@
 			     "aborting.\n");
 		goto err_out_dev_close;
 	}
+
+	/* Configure Maximum Outstanding IO reqs*/
+	if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
+		host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
+					max_t(u32, FNIC_MIN_IO_REQ,
+					fnic->config.io_throttle_count));
+	}
+	fnic->fnic_max_tag_id = host->can_queue;
+
+	err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+			  "Unable to alloc shared tag map\n");
+		goto err_out_dev_close;
+	}
+
 	host->max_lun = fnic->config.luns_per_tgt;
 	host->max_id = FNIC_MAX_FCP_TARGET;
 	host->max_cmd_len = FCOE_MAX_CMD_LEN;
@@ -719,6 +837,7 @@
 	}
 
 	fc_lport_init_stats(lp);
+	fnic->stats_reset_time = jiffies;
 
 	fc_lport_config(lp);
 
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index a97e6e5..d014aae 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -111,6 +111,12 @@
 	return &fnic->io_req_lock[hash];
 }
 
+static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
+					    int tag)
+{
+	return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
+}
+
 /*
  * Unmap the data buffer and sense buffer for an io_req,
  * also unmap and free the device-private scatter/gather list.
@@ -730,7 +736,7 @@
 	fcpio_tag_id_dec(&tag, &id);
 	icmnd_cmpl = &desc->u.icmnd_cmpl;
 
-	if (id >= FNIC_MAX_IO_REQ) {
+	if (id >= fnic->fnic_max_tag_id) {
 		shost_printk(KERN_ERR, fnic->lport->host,
 			"Tag out of range tag %x hdr status = %s\n",
 			     id, fnic_fcpio_status_to_str(hdr_status));
@@ -818,38 +824,6 @@
 		if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
 			xfer_len -= icmnd_cmpl->residual;
 
-		/*
-		 * If queue_full, then try to reduce queue depth for all
-		 * LUNS on the target. Todo: this should be accompanied
-		 * by a periodic queue_depth rampup based on successful
-		 * IO completion.
-		 */
-		if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
-			struct scsi_device *t_sdev;
-			int qd = 0;
-
-			shost_for_each_device(t_sdev, sc->device->host) {
-				if (t_sdev->id != sc->device->id)
-					continue;
-
-				if (t_sdev->queue_depth > 1) {
-					qd = scsi_track_queue_full
-						(t_sdev,
-						 t_sdev->queue_depth - 1);
-					if (qd == -1)
-						qd = t_sdev->host->cmd_per_lun;
-					shost_printk(KERN_INFO,
-						     fnic->lport->host,
-						     "scsi[%d:%d:%d:%d"
-						     "] queue full detected,"
-						     "new depth = %d\n",
-						     t_sdev->host->host_no,
-						     t_sdev->channel,
-						     t_sdev->id, t_sdev->lun,
-						     t_sdev->queue_depth);
-				}
-			}
-		}
 		break;
 
 	case FCPIO_TIMEOUT:          /* request was timed out */
@@ -939,7 +913,7 @@
 	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 	fcpio_tag_id_dec(&tag, &id);
 
-	if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) {
+	if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
 		shost_printk(KERN_ERR, fnic->lport->host,
 		"Tag out of range tag %x hdr status = %s\n",
 		id, fnic_fcpio_status_to_str(hdr_status));
@@ -988,9 +962,7 @@
 			spin_unlock_irqrestore(io_lock, flags);
 			return;
 		}
-		CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
 		CMD_ABTS_STATUS(sc) = hdr_status;
-
 		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 			      "abts cmpl recd. id %d status %s\n",
@@ -1148,23 +1120,25 @@
 
 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
 {
-	unsigned int i;
+	int i;
 	struct fnic_io_req *io_req;
 	unsigned long flags = 0;
 	struct scsi_cmnd *sc;
 	spinlock_t *io_lock;
 	unsigned long start_time = 0;
 
-	for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
+	for (i = 0; i < fnic->fnic_max_tag_id; i++) {
 		if (i == exclude_id)
 			continue;
 
-		sc = scsi_host_find_tag(fnic->lport->host, i);
-		if (!sc)
-			continue;
-
-		io_lock = fnic_io_lock_hash(fnic, sc);
+		io_lock = fnic_io_lock_tag(fnic, i);
 		spin_lock_irqsave(io_lock, flags);
+		sc = scsi_host_find_tag(fnic->lport->host, i);
+		if (!sc) {
+			spin_unlock_irqrestore(io_lock, flags);
+			continue;
+		}
+
 		io_req = (struct fnic_io_req *)CMD_SP(sc);
 		if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
 			!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
@@ -1236,7 +1210,7 @@
 	fcpio_tag_id_dec(&desc->hdr.tag, &id);
 	id &= FNIC_TAG_MASK;
 
-	if (id >= FNIC_MAX_IO_REQ)
+	if (id >= fnic->fnic_max_tag_id)
 		return;
 
 	sc = scsi_host_find_tag(fnic->lport->host, id);
@@ -1340,14 +1314,15 @@
 	if (fnic->in_remove)
 		return;
 
-	for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
 		abt_tag = tag;
-		sc = scsi_host_find_tag(fnic->lport->host, tag);
-		if (!sc)
-			continue;
-
-		io_lock = fnic_io_lock_hash(fnic, sc);
+		io_lock = fnic_io_lock_tag(fnic, tag);
 		spin_lock_irqsave(io_lock, flags);
+		sc = scsi_host_find_tag(fnic->lport->host, tag);
+		if (!sc) {
+			spin_unlock_irqrestore(io_lock, flags);
+			continue;
+		}
 
 		io_req = (struct fnic_io_req *)CMD_SP(sc);
 
@@ -1441,12 +1416,29 @@
 	unsigned long flags;
 	struct scsi_cmnd *sc;
 	struct scsi_lun fc_lun;
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
-	struct fc_lport *lport = rdata->local_port;
-	struct fnic *fnic = lport_priv(lport);
+	struct fc_rport_libfc_priv *rdata;
+	struct fc_lport *lport;
+	struct fnic *fnic;
 	struct fc_rport *cmd_rport;
 	enum fnic_ioreq_state old_ioreq_state;
 
+	if (!rport) {
+		printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
+		return;
+	}
+	rdata = rport->dd_data;
+
+	if (!rdata) {
+		printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
+		return;
+	}
+	lport = rdata->local_port;
+
+	if (!lport) {
+		printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
+		return;
+	}
+	fnic = lport_priv(lport);
 	FNIC_SCSI_DBG(KERN_DEBUG,
 		      fnic->lport->host, "fnic_terminate_rport_io called"
 		      " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
@@ -1456,18 +1448,21 @@
 	if (fnic->in_remove)
 		return;
 
-	for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
 		abt_tag = tag;
+		io_lock = fnic_io_lock_tag(fnic, tag);
+		spin_lock_irqsave(io_lock, flags);
 		sc = scsi_host_find_tag(fnic->lport->host, tag);
-		if (!sc)
+		if (!sc) {
+			spin_unlock_irqrestore(io_lock, flags);
 			continue;
+		}
 
 		cmd_rport = starget_to_rport(scsi_target(sc->device));
-		if (rport != cmd_rport)
+		if (rport != cmd_rport) {
+			spin_unlock_irqrestore(io_lock, flags);
 			continue;
-
-		io_lock = fnic_io_lock_hash(fnic, sc);
-		spin_lock_irqsave(io_lock, flags);
+		}
 
 		io_req = (struct fnic_io_req *)CMD_SP(sc);
 
@@ -1680,13 +1675,15 @@
 	io_req->abts_done = NULL;
 
 	/* fw did not complete abort, timed out */
-	if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+	if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
 		spin_unlock_irqrestore(io_lock, flags);
 		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
 		ret = FAILED;
 		goto fnic_abort_cmd_end;
 	}
 
+	CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+
 	/*
 	 * firmware completed the abort, check the status,
 	 * free the io_req irrespective of failure or success
@@ -1784,17 +1781,18 @@
 	DECLARE_COMPLETION_ONSTACK(tm_done);
 	enum fnic_ioreq_state old_ioreq_state;
 
-	for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
+		io_lock = fnic_io_lock_tag(fnic, tag);
+		spin_lock_irqsave(io_lock, flags);
 		sc = scsi_host_find_tag(fnic->lport->host, tag);
 		/*
 		 * ignore this lun reset cmd or cmds that do not belong to
 		 * this lun
 		 */
-		if (!sc || sc == lr_sc || sc->device != lun_dev)
+		if (!sc || sc == lr_sc || sc->device != lun_dev) {
+			spin_unlock_irqrestore(io_lock, flags);
 			continue;
-
-		io_lock = fnic_io_lock_hash(fnic, sc);
-		spin_lock_irqsave(io_lock, flags);
+		}
 
 		io_req = (struct fnic_io_req *)CMD_SP(sc);
 
@@ -1823,6 +1821,11 @@
 			spin_unlock_irqrestore(io_lock, flags);
 			continue;
 		}
+
+		if (io_req->abts_done)
+			shost_printk(KERN_ERR, fnic->lport->host,
+			  "%s: io_req->abts_done is set state is %s\n",
+			  __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
 		old_ioreq_state = CMD_STATE(sc);
 		/*
 		 * Any pending IO issued prior to reset is expected to be
@@ -1833,11 +1836,6 @@
 		 */
 		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
 
-		if (io_req->abts_done)
-			shost_printk(KERN_ERR, fnic->lport->host,
-			  "%s: io_req->abts_done is set state is %s\n",
-			  __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
-
 		BUG_ON(io_req->abts_done);
 
 		abt_tag = tag;
@@ -1890,12 +1888,13 @@
 		io_req->abts_done = NULL;
 
 		/* if abort is still pending with fw, fail */
-		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+		if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
 			spin_unlock_irqrestore(io_lock, flags);
 			CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
 			ret = 1;
 			goto clean_pending_aborts_end;
 		}
+		CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
 		CMD_SP(sc) = NULL;
 		spin_unlock_irqrestore(io_lock, flags);
 
@@ -2093,8 +2092,8 @@
 		spin_unlock_irqrestore(io_lock, flags);
 		int_to_scsilun(sc->device->lun, &fc_lun);
 		/*
-		 * Issue abort and terminate on the device reset request.
-		 * If q'ing of the abort fails, retry issue it after a delay.
+		 * Issue abort and terminate on device reset request.
+		 * If q'ing of terminate fails, retry it after a delay.
 		 */
 		while (1) {
 			spin_lock_irqsave(io_lock, flags);
@@ -2405,7 +2404,7 @@
 		lun_dev = lr_sc->device;
 
 	/* walk again to check, if IOs are still pending in fw */
-	for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
 		sc = scsi_host_find_tag(fnic->lport->host, tag);
 		/*
 		 * ignore this lun reset cmd or cmds that do not belong to
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
index fbb5536..e343e1d 100644
--- a/drivers/scsi/fnic/vnic_scsi.h
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -54,8 +54,8 @@
 #define VNIC_FNIC_PLOGI_TIMEOUT_MIN         1000
 #define VNIC_FNIC_PLOGI_TIMEOUT_MAX         255000
 
-#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN     256
-#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX     4096
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN     1
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX     2048
 
 #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN     0
 #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX     240000
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index fac8cf5..891c86b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -54,7 +54,7 @@
 #include "hpsa.h"
 
 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "2.0.2-1"
+#define HPSA_DRIVER_VERSION "3.4.0-1"
 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
 #define HPSA "hpsa"
 
@@ -89,13 +89,14 @@
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324a},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324b},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x334D},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
@@ -107,7 +108,19 @@
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1925},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x334d},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 	{0,}
@@ -125,24 +138,35 @@
 	{0x3245103C, "Smart Array P410i", &SA5_access},
 	{0x3247103C, "Smart Array P411", &SA5_access},
 	{0x3249103C, "Smart Array P812", &SA5_access},
-	{0x324a103C, "Smart Array P712m", &SA5_access},
-	{0x324b103C, "Smart Array P711m", &SA5_access},
+	{0x324A103C, "Smart Array P712m", &SA5_access},
+	{0x324B103C, "Smart Array P711m", &SA5_access},
 	{0x3350103C, "Smart Array P222", &SA5_access},
 	{0x3351103C, "Smart Array P420", &SA5_access},
 	{0x3352103C, "Smart Array P421", &SA5_access},
 	{0x3353103C, "Smart Array P822", &SA5_access},
+	{0x334D103C, "Smart Array P822se", &SA5_access},
 	{0x3354103C, "Smart Array P420i", &SA5_access},
 	{0x3355103C, "Smart Array P220i", &SA5_access},
 	{0x3356103C, "Smart Array P721m", &SA5_access},
-	{0x1920103C, "Smart Array", &SA5_access},
-	{0x1921103C, "Smart Array", &SA5_access},
-	{0x1922103C, "Smart Array", &SA5_access},
-	{0x1923103C, "Smart Array", &SA5_access},
-	{0x1924103C, "Smart Array", &SA5_access},
-	{0x1925103C, "Smart Array", &SA5_access},
-	{0x1926103C, "Smart Array", &SA5_access},
-	{0x1928103C, "Smart Array", &SA5_access},
-	{0x334d103C, "Smart Array P822se", &SA5_access},
+	{0x1921103C, "Smart Array P830i", &SA5_access},
+	{0x1922103C, "Smart Array P430", &SA5_access},
+	{0x1923103C, "Smart Array P431", &SA5_access},
+	{0x1924103C, "Smart Array P830", &SA5_access},
+	{0x1926103C, "Smart Array P731m", &SA5_access},
+	{0x1928103C, "Smart Array P230i", &SA5_access},
+	{0x1929103C, "Smart Array P530", &SA5_access},
+	{0x21BD103C, "Smart Array", &SA5_access},
+	{0x21BE103C, "Smart Array", &SA5_access},
+	{0x21BF103C, "Smart Array", &SA5_access},
+	{0x21C0103C, "Smart Array", &SA5_access},
+	{0x21C1103C, "Smart Array", &SA5_access},
+	{0x21C2103C, "Smart Array", &SA5_access},
+	{0x21C3103C, "Smart Array", &SA5_access},
+	{0x21C4103C, "Smart Array", &SA5_access},
+	{0x21C5103C, "Smart Array", &SA5_access},
+	{0x21C7103C, "Smart Array", &SA5_access},
+	{0x21C8103C, "Smart Array", &SA5_access},
+	{0x21C9103C, "Smart Array", &SA5_access},
 	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
 };
 
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 4e31caa..23f5ba5 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2208,7 +2208,10 @@
 
 	if (rsp_rc != 0) {
 		sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
-		return -EIO;
+		/* If failure is received, the host adapter is most likely going
+		 through reset, return success so the caller will wait for the command
+		 being cancelled to get returned */
+		return 0;
 	}
 
 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
@@ -2221,7 +2224,15 @@
 
 	if (status != IBMVFC_MAD_SUCCESS) {
 		sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
-		return -EIO;
+		switch (status) {
+		case IBMVFC_MAD_DRIVER_FAILED:
+		case IBMVFC_MAD_CRQ_ERROR:
+			/* Host adapter most likely going through reset, return success to
+			 the caller will wait for the command being cancelled to get returned */
+			return 0;
+		default:
+			return -EIO;
+		};
 	}
 
 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d0fa4b6..fa76440 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -241,7 +241,7 @@
 	struct device_node *rootdn;
 
 	const char *ppartition_name;
-	const unsigned int *p_number_ptr;
+	const __be32 *p_number_ptr;
 
 	/* Retrieve information about this partition */
 	rootdn = of_find_node_by_path("/");
@@ -255,7 +255,7 @@
 				sizeof(partition_name));
 	p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
 	if (p_number_ptr)
-		partition_number = *p_number_ptr;
+		partition_number = of_read_number(p_number_ptr, 1);
 	of_node_put(rootdn);
 }
 
@@ -270,10 +270,11 @@
 	strncpy(hostdata->madapter_info.partition_name, partition_name,
 			sizeof(hostdata->madapter_info.partition_name));
 
-	hostdata->madapter_info.partition_number = partition_number;
+	hostdata->madapter_info.partition_number =
+					cpu_to_be32(partition_number);
 
-	hostdata->madapter_info.mad_version = 1;
-	hostdata->madapter_info.os_type = 2;
+	hostdata->madapter_info.mad_version = cpu_to_be32(1);
+	hostdata->madapter_info.os_type = cpu_to_be32(2);
 }
 
 /**
@@ -464,9 +465,9 @@
 		memset(&evt->crq, 0x00, sizeof(evt->crq));
 		atomic_set(&evt->free, 1);
 		evt->crq.valid = 0x80;
-		evt->crq.IU_length = sizeof(*evt->xfer_iu);
-		evt->crq.IU_data_ptr = pool->iu_token + 
-			sizeof(*evt->xfer_iu) * i;
+		evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
+		evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
+			sizeof(*evt->xfer_iu) * i);
 		evt->xfer_iu = pool->iu_storage + i;
 		evt->hostdata = hostdata;
 		evt->ext_list = NULL;
@@ -588,7 +589,7 @@
 	evt_struct->cmnd_done = NULL;
 	evt_struct->sync_srp = NULL;
 	evt_struct->crq.format = format;
-	evt_struct->crq.timeout = timeout;
+	evt_struct->crq.timeout = cpu_to_be16(timeout);
 	evt_struct->done = done;
 }
 
@@ -659,8 +660,8 @@
 
 	scsi_for_each_sg(cmd, sg, nseg, i) {
 		struct srp_direct_buf *descr = md + i;
-		descr->va = sg_dma_address(sg);
-		descr->len = sg_dma_len(sg);
+		descr->va = cpu_to_be64(sg_dma_address(sg));
+		descr->len = cpu_to_be32(sg_dma_len(sg));
 		descr->key = 0;
 		total_length += sg_dma_len(sg);
  	}
@@ -703,13 +704,14 @@
 	}
 
 	indirect->table_desc.va = 0;
-	indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
+	indirect->table_desc.len = cpu_to_be32(sg_mapped *
+					       sizeof(struct srp_direct_buf));
 	indirect->table_desc.key = 0;
 
 	if (sg_mapped <= MAX_INDIRECT_BUFS) {
 		total_length = map_sg_list(cmd, sg_mapped,
 					   &indirect->desc_list[0]);
-		indirect->len = total_length;
+		indirect->len = cpu_to_be32(total_length);
 		return 1;
 	}
 
@@ -731,9 +733,10 @@
 
 	total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
 
-	indirect->len = total_length;
-	indirect->table_desc.va = evt_struct->ext_list_token;
-	indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
+	indirect->len = cpu_to_be32(total_length);
+	indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
+	indirect->table_desc.len = cpu_to_be32(sg_mapped *
+					       sizeof(indirect->desc_list[0]));
 	memcpy(indirect->desc_list, evt_struct->ext_list,
 	       MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
  	return 1;
@@ -849,7 +852,7 @@
 				   struct ibmvscsi_host_data *hostdata,
 				   unsigned long timeout)
 {
-	u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
+	__be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
 	int request_status = 0;
 	int rc;
 	int srp_req = 0;
@@ -920,8 +923,9 @@
 		add_timer(&evt_struct->timer);
 	}
 
-	if ((rc =
-	     ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
+	rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
+			       be64_to_cpu(crq_as_u64[1]));
+	if (rc != 0) {
 		list_del(&evt_struct->list);
 		del_timer(&evt_struct->timer);
 
@@ -987,15 +991,16 @@
 		if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
 			memcpy(cmnd->sense_buffer,
 			       rsp->data,
-			       rsp->sense_data_len);
+			       be32_to_cpu(rsp->sense_data_len));
 		unmap_cmd_data(&evt_struct->iu.srp.cmd, 
 			       evt_struct, 
 			       evt_struct->hostdata->dev);
 
 		if (rsp->flags & SRP_RSP_FLAG_DOOVER)
-			scsi_set_resid(cmnd, rsp->data_out_res_cnt);
+			scsi_set_resid(cmnd,
+				       be32_to_cpu(rsp->data_out_res_cnt));
 		else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
-			scsi_set_resid(cmnd, rsp->data_in_res_cnt);
+			scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
 	}
 
 	if (evt_struct->cmnd_done)
@@ -1037,7 +1042,7 @@
 	memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
 	srp_cmd->opcode = SRP_CMD;
 	memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
-	srp_cmd->lun = ((u64) lun) << 48;
+	srp_cmd->lun = cpu_to_be64(((u64)lun) << 48);
 
 	if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
 		if (!firmware_has_feature(FW_FEATURE_CMO))
@@ -1062,9 +1067,10 @@
 	if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
 	     out_fmt == SRP_DATA_DESC_INDIRECT) &&
 	    indirect->table_desc.va == 0) {
-		indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
+		indirect->table_desc.va =
+			cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
 			offsetof(struct srp_cmd, add_data) +
-			offsetof(struct srp_indirect_buf, desc_list);
+			offsetof(struct srp_indirect_buf, desc_list));
 	}
 
 	return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
@@ -1158,7 +1164,7 @@
 	 * request_limit could have been set to -1 by this client.
 	 */
 	atomic_set(&hostdata->request_limit,
-		   evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
+		   be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
 
 	/* If we had any pending I/Os, kick them */
 	scsi_unblock_requests(hostdata->host);
@@ -1184,8 +1190,9 @@
 	login = &evt_struct->iu.srp.login_req;
 	memset(login, 0, sizeof(*login));
 	login->opcode = SRP_LOGIN_REQ;
-	login->req_it_iu_len = sizeof(union srp_iu);
-	login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
+	login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
+	login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+					 SRP_BUF_FORMAT_INDIRECT);
 
 	spin_lock_irqsave(hostdata->host->host_lock, flags);
 	/* Start out with a request limit of 0, since this is negotiated in
@@ -1214,12 +1221,13 @@
 		dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
 			evt_struct->xfer_iu->mad.capabilities.common.status);
 	} else {
-		if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
+		if (hostdata->caps.migration.common.server_support !=
+		    cpu_to_be16(SERVER_SUPPORTS_CAP))
 			dev_info(hostdata->dev, "Partition migration not supported\n");
 
 		if (client_reserve) {
 			if (hostdata->caps.reserve.common.server_support ==
-			    SERVER_SUPPORTS_CAP)
+			    cpu_to_be16(SERVER_SUPPORTS_CAP))
 				dev_info(hostdata->dev, "Client reserve enabled\n");
 			else
 				dev_info(hostdata->dev, "Client reserve not supported\n");
@@ -1251,9 +1259,9 @@
 	req = &evt_struct->iu.mad.capabilities;
 	memset(req, 0, sizeof(*req));
 
-	hostdata->caps.flags = CAP_LIST_SUPPORTED;
+	hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
 	if (hostdata->client_migrated)
-		hostdata->caps.flags |= CLIENT_MIGRATED;
+		hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
 
 	strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
 		sizeof(hostdata->caps.name));
@@ -1264,22 +1272,31 @@
 	strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
 	hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
 
-	req->common.type = VIOSRP_CAPABILITIES_TYPE;
-	req->buffer = hostdata->caps_addr;
+	req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
+	req->buffer = cpu_to_be64(hostdata->caps_addr);
 
-	hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
-	hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
-	hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
-	hostdata->caps.migration.ecl = 1;
+	hostdata->caps.migration.common.cap_type =
+				cpu_to_be32(MIGRATION_CAPABILITIES);
+	hostdata->caps.migration.common.length =
+				cpu_to_be16(sizeof(hostdata->caps.migration));
+	hostdata->caps.migration.common.server_support =
+				cpu_to_be16(SERVER_SUPPORTS_CAP);
+	hostdata->caps.migration.ecl = cpu_to_be32(1);
 
 	if (client_reserve) {
-		hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
-		hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
-		hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
-		hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
-		req->common.length = sizeof(hostdata->caps);
+		hostdata->caps.reserve.common.cap_type =
+					cpu_to_be32(RESERVATION_CAPABILITIES);
+		hostdata->caps.reserve.common.length =
+				cpu_to_be16(sizeof(hostdata->caps.reserve));
+		hostdata->caps.reserve.common.server_support =
+				cpu_to_be16(SERVER_SUPPORTS_CAP);
+		hostdata->caps.reserve.type =
+				cpu_to_be32(CLIENT_RESERVE_SCSI_2);
+		req->common.length =
+				cpu_to_be16(sizeof(hostdata->caps));
 	} else
-		req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
+		req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
+						sizeof(hostdata->caps.reserve));
 
 	spin_lock_irqsave(hostdata->host->host_lock, flags);
 	if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@@ -1297,7 +1314,7 @@
 static void fast_fail_rsp(struct srp_event_struct *evt_struct)
 {
 	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
-	u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
+	u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
 
 	if (status == VIOSRP_MAD_NOT_SUPPORTED)
 		dev_err(hostdata->dev, "fast_fail not supported in server\n");
@@ -1334,8 +1351,8 @@
 
 	fast_fail_mad = &evt_struct->iu.mad.fast_fail;
 	memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
-	fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
-	fast_fail_mad->common.length = sizeof(*fast_fail_mad);
+	fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
+	fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
 
 	spin_lock_irqsave(hostdata->host->host_lock, flags);
 	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
@@ -1362,15 +1379,15 @@
 			 "host partition %s (%d), OS %d, max io %u\n",
 			 hostdata->madapter_info.srp_version,
 			 hostdata->madapter_info.partition_name,
-			 hostdata->madapter_info.partition_number,
-			 hostdata->madapter_info.os_type,
-			 hostdata->madapter_info.port_max_txu[0]);
+			 be32_to_cpu(hostdata->madapter_info.partition_number),
+			 be32_to_cpu(hostdata->madapter_info.os_type),
+			 be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
 		
 		if (hostdata->madapter_info.port_max_txu[0]) 
 			hostdata->host->max_sectors = 
-				hostdata->madapter_info.port_max_txu[0] >> 9;
+				be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
 		
-		if (hostdata->madapter_info.os_type == 3 &&
+		if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 &&
 		    strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
 			dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
 				hostdata->madapter_info.srp_version);
@@ -1379,7 +1396,7 @@
 			hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
 		}
 
-		if (hostdata->madapter_info.os_type == 3) {
+		if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) {
 			enable_fast_fail(hostdata);
 			return;
 		}
@@ -1414,9 +1431,9 @@
 	req = &evt_struct->iu.mad.adapter_info;
 	memset(req, 0x00, sizeof(*req));
 	
-	req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
-	req->common.length = sizeof(hostdata->madapter_info);
-	req->buffer = hostdata->adapter_info_addr;
+	req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
+	req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
+	req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
 
 	spin_lock_irqsave(hostdata->host->host_lock, flags);
 	if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@@ -1501,7 +1518,7 @@
 		/* Set up an abort SRP command */
 		memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
 		tsk_mgmt->opcode = SRP_TSK_MGMT;
-		tsk_mgmt->lun = ((u64) lun) << 48;
+		tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
 		tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
 		tsk_mgmt->task_tag = (u64) found_evt;
 
@@ -1624,7 +1641,7 @@
 		/* Set up a lun reset SRP command */
 		memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
 		tsk_mgmt->opcode = SRP_TSK_MGMT;
-		tsk_mgmt->lun = ((u64) lun) << 48;
+		tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
 		tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
 
 		evt->sync_srp = &srp_rsp;
@@ -1735,8 +1752,9 @@
 {
 	long rc;
 	unsigned long flags;
+	/* The hypervisor copies our tag value here so no byteswapping */
 	struct srp_event_struct *evt_struct =
-	    (struct srp_event_struct *)crq->IU_data_ptr;
+			(__force struct srp_event_struct *)crq->IU_data_ptr;
 	switch (crq->valid) {
 	case 0xC0:		/* initialization */
 		switch (crq->format) {
@@ -1792,18 +1810,18 @@
 	 */
 	if (!valid_event_struct(&hostdata->pool, evt_struct)) {
 		dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
-		       (void *)crq->IU_data_ptr);
+		       evt_struct);
 		return;
 	}
 
 	if (atomic_read(&evt_struct->free)) {
 		dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
-			(void *)crq->IU_data_ptr);
+			evt_struct);
 		return;
 	}
 
 	if (crq->format == VIOSRP_SRP_FORMAT)
-		atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
+		atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
 			   &hostdata->request_limit);
 
 	del_timer(&evt_struct->timer);
@@ -1856,13 +1874,11 @@
 
 	/* Set up a lun reset SRP command */
 	memset(host_config, 0x00, sizeof(*host_config));
-	host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
-	host_config->common.length = length;
-	host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
-						    length,
-						    DMA_BIDIRECTIONAL);
+	host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE);
+	host_config->common.length = cpu_to_be16(length);
+	addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL);
 
-	if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
+	if (dma_mapping_error(hostdata->dev, addr)) {
 		if (!firmware_has_feature(FW_FEATURE_CMO))
 			dev_err(hostdata->dev,
 			        "dma_mapping error getting host config\n");
@@ -1870,6 +1886,8 @@
 		return -1;
 	}
 
+	host_config->buffer = cpu_to_be64(addr);
+
 	init_completion(&evt_struct->comp);
 	spin_lock_irqsave(hostdata->host->host_lock, flags);
 	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 2cd735d..1162430 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -75,9 +75,9 @@
 	u8 format;		/* SCSI vs out-of-band */
 	u8 reserved;
 	u8 status;		/* non-scsi failure? (e.g. DMA failure) */
-	u16 timeout;		/* in seconds */
-	u16 IU_length;		/* in bytes */
-	u64 IU_data_ptr;	/* the TCE for transferring data */
+	__be16 timeout;		/* in seconds */
+	__be16 IU_length;		/* in bytes */
+	__be64 IU_data_ptr;	/* the TCE for transferring data */
 };
 
 /* MADs are Management requests above and beyond the IUs defined in the SRP
@@ -124,10 +124,10 @@
  * Common MAD header
  */
 struct mad_common {
-	u32 type;
-	u16 status;
-	u16 length;
-	u64 tag;
+	__be32 type;
+	__be16 status;
+	__be16 length;
+	__be64 tag;
 };
 
 /*
@@ -139,23 +139,23 @@
  */
 struct viosrp_empty_iu {
 	struct mad_common common;
-	u64 buffer;
-	u32 port;
+	__be64 buffer;
+	__be32 port;
 };
 
 struct viosrp_error_log {
 	struct mad_common common;
-	u64 buffer;
+	__be64 buffer;
 };
 
 struct viosrp_adapter_info {
 	struct mad_common common;
-	u64 buffer;
+	__be64 buffer;
 };
 
 struct viosrp_host_config {
 	struct mad_common common;
-	u64 buffer;
+	__be64 buffer;
 };
 
 struct viosrp_fast_fail {
@@ -164,27 +164,27 @@
 
 struct viosrp_capabilities {
 	struct mad_common common;
-	u64 buffer;
+	__be64 buffer;
 };
 
 struct mad_capability_common {
-	u32 cap_type;
-	u16 length;
-	u16 server_support;
+	__be32 cap_type;
+	__be16 length;
+	__be16 server_support;
 };
 
 struct mad_reserve_cap {
 	struct mad_capability_common common;
-	u32 type;
+	__be32 type;
 };
 
 struct mad_migration_cap {
 	struct mad_capability_common common;
-	u32 ecl;
+	__be32 ecl;
 };
 
 struct capabilities{
-	u32 flags;
+	__be32 flags;
 	char name[SRP_MAX_LOC_LEN];
 	char loc[SRP_MAX_LOC_LEN];
 	struct mad_migration_cap migration;
@@ -208,10 +208,10 @@
 struct mad_adapter_info_data {
 	char srp_version[8];
 	char partition_name[96];
-	u32 partition_number;
-	u32 mad_version;
-	u32 os_type;
-	u32 port_max_txu[8];	/* per-port maximum transfer */
+	__be32 partition_number;
+	__be32 mad_version;
+	__be32 os_type;
+	__be32 port_max_txu[8];	/* per-port maximum transfer */
 };
 
 #endif
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index df43bfe..4e1b75c 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -708,6 +708,7 @@
 	uint32_t cfg_multi_ring_type;
 	uint32_t cfg_poll;
 	uint32_t cfg_poll_tmo;
+	uint32_t cfg_task_mgmt_tmo;
 	uint32_t cfg_use_msi;
 	uint32_t cfg_fcp_imax;
 	uint32_t cfg_fcp_cpu_map;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 16498e0..00656fc 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1865,8 +1865,10 @@
 { \
 	if (val >= minval && val <= maxval) {\
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
-			"3053 lpfc_" #attr " changed from %d to %d\n", \
-			vport->cfg_##attr, val); \
+			"3053 lpfc_" #attr \
+			" changed from %d (x%x) to %d (x%x)\n", \
+			vport->cfg_##attr, vport->cfg_##attr, \
+			val, val); \
 		vport->cfg_##attr = val;\
 		return 0;\
 	}\
@@ -4011,8 +4013,11 @@
 # For [0], FCP commands are issued to Work Queues ina round robin fashion.
 # For [1], FCP commands are issued to a Work Queue associated with the
 #          current CPU.
+# It would be set to 1 by the driver if it's able to set up cpu affinity
+# for FCP I/Os through Work Queue associated with the current CPU. Otherwise,
+# roundrobin scheduling of FCP I/Os through WQs will be used.
 */
-LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
+LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for "
 		"issuing commands [0] - Round Robin, [1] - Current CPU");
 
 /*
@@ -4110,6 +4115,12 @@
 	     "Milliseconds driver will wait between polling FCP ring");
 
 /*
+# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
+# to complete in seconds. Value range is [5,180], default value is 60.
+*/
+LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
+	     "Maximum time to wait for task management commands to complete");
+/*
 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
 #		support this feature
 #       0  = MSI disabled
@@ -4295,6 +4306,7 @@
 	&dev_attr_issue_reset,
 	&dev_attr_lpfc_poll,
 	&dev_attr_lpfc_poll_tmo,
+	&dev_attr_lpfc_task_mgmt_tmo,
 	&dev_attr_lpfc_use_msi,
 	&dev_attr_lpfc_fcp_imax,
 	&dev_attr_lpfc_fcp_cpu_map,
@@ -5274,6 +5286,7 @@
 	lpfc_topology_init(phba, lpfc_topology);
 	lpfc_link_speed_init(phba, lpfc_link_speed);
 	lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
+	lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
 	lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
 	lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
 	lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 79c13c3..b92aec9 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -317,6 +317,11 @@
 	}
 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
+	/* Close the timeout handler abort window */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
 	iocb = &dd_data->context_un.iocb;
 	ndlp = iocb->ndlp;
 	rmp = iocb->rmp;
@@ -387,6 +392,7 @@
 	int request_nseg;
 	int reply_nseg;
 	struct bsg_job_data *dd_data;
+	unsigned long flags;
 	uint32_t creg_val;
 	int rc = 0;
 	int iocb_stat;
@@ -501,14 +507,24 @@
 	}
 
 	iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
-	if (iocb_stat == IOCB_SUCCESS)
+
+	if (iocb_stat == IOCB_SUCCESS) {
+		spin_lock_irqsave(&phba->hbalock, flags);
+		/* make sure the I/O had not been completed yet */
+		if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+			/* open up abort window to timeout handler */
+			cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, flags);
 		return 0; /* done for now */
-	else if (iocb_stat == IOCB_BUSY)
+	} else if (iocb_stat == IOCB_BUSY) {
 		rc = -EAGAIN;
-	else
+	} else {
 		rc = -EIO;
+	}
 
 	/* iocb failed so cleanup */
+	job->dd_data = NULL;
 
 free_rmp:
 	lpfc_free_bsg_buffers(phba, rmp);
@@ -577,6 +593,11 @@
 	}
 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
+	/* Close the timeout handler abort window */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
 	rsp = &rspiocbq->iocb;
 	pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
 	prsp = (struct lpfc_dmabuf *)pcmd->list.next;
@@ -639,6 +660,7 @@
 	struct lpfc_iocbq *cmdiocbq;
 	uint16_t rpi = 0;
 	struct bsg_job_data *dd_data;
+	unsigned long flags;
 	uint32_t creg_val;
 	int rc = 0;
 
@@ -721,15 +743,25 @@
 
 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
 
-	if (rc == IOCB_SUCCESS)
+	if (rc == IOCB_SUCCESS) {
+		spin_lock_irqsave(&phba->hbalock, flags);
+		/* make sure the I/O had not been completed/released */
+		if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+			/* open up abort window to timeout handler */
+			cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, flags);
 		return 0; /* done for now */
-	else if (rc == IOCB_BUSY)
+	} else if (rc == IOCB_BUSY) {
 		rc = -EAGAIN;
-	else
+	} else {
 		rc = -EIO;
+	}
+
+	/* iocb failed so cleanup */
+	job->dd_data = NULL;
 
 linkdown_err:
-
 	cmdiocbq->context1 = ndlp;
 	lpfc_els_free_iocb(phba, cmdiocbq);
 
@@ -1249,7 +1281,7 @@
 	struct lpfc_hba *phba = vport->phba;
 	struct get_ct_event *event_req;
 	struct get_ct_event_reply *event_reply;
-	struct lpfc_bsg_event *evt;
+	struct lpfc_bsg_event *evt, *evt_next;
 	struct event_data *evt_dat = NULL;
 	unsigned long flags;
 	uint32_t rc = 0;
@@ -1269,7 +1301,7 @@
 	event_reply = (struct get_ct_event_reply *)
 		job->reply->reply_data.vendor_reply.vendor_rsp;
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
-	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+	list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
 		if (evt->reg_id == event_req->ev_reg_id) {
 			if (list_empty(&evt->events_to_get))
 				break;
@@ -1370,6 +1402,11 @@
 	}
 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
+	/* Close the timeout handler abort window */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
 	ndlp = dd_data->context_un.iocb.ndlp;
 	cmp = cmdiocbq->context2;
 	bmp = cmdiocbq->context3;
@@ -1433,6 +1470,7 @@
 	int rc = 0;
 	struct lpfc_nodelist *ndlp = NULL;
 	struct bsg_job_data *dd_data;
+	unsigned long flags;
 	uint32_t creg_val;
 
 	/* allocate our bsg tracking structure */
@@ -1542,8 +1580,19 @@
 
 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
 
-	if (rc == IOCB_SUCCESS)
+	if (rc == IOCB_SUCCESS) {
+		spin_lock_irqsave(&phba->hbalock, flags);
+		/* make sure the I/O had not been completed/released */
+		if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
+			/* open up abort window to timeout handler */
+			ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, flags);
 		return 0; /* done for now */
+	}
+
+	/* iocb failed so cleanup */
+	job->dd_data = NULL;
 
 issue_ct_rsp_exit:
 	lpfc_sli_release_iocbq(phba, ctiocb);
@@ -5284,9 +5333,15 @@
 		 * remove it from the txq queue and call cancel iocbs.
 		 * Otherwise, call abort iotag
 		 */
-
 		cmdiocb = dd_data->context_un.iocb.cmdiocbq;
-		spin_lock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+		spin_lock_irqsave(&phba->hbalock, flags);
+		/* make sure the I/O abort window is still open */
+		if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			return -EAGAIN;
+		}
 		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
 					 list) {
 			if (check_iocb == cmdiocb) {
@@ -5296,8 +5351,7 @@
 		}
 		if (list_empty(&completions))
 			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
-		spin_unlock_irq(&phba->hbalock);
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		spin_unlock_irqrestore(&phba->hbalock, flags);
 		if (!list_empty(&completions)) {
 			lpfc_sli_cancel_iocbs(phba, &completions,
 					      IOSTAT_LOCAL_REJECT,
@@ -5321,9 +5375,10 @@
 		 * remove it from the txq queue and call cancel iocbs.
 		 * Otherwise, call abort iotag.
 		 */
-
 		cmdiocb = dd_data->context_un.menlo.cmdiocbq;
-		spin_lock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+		spin_lock_irqsave(&phba->hbalock, flags);
 		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
 					 list) {
 			if (check_iocb == cmdiocb) {
@@ -5333,8 +5388,7 @@
 		}
 		if (list_empty(&completions))
 			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
-		spin_unlock_irq(&phba->hbalock);
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		spin_unlock_irqrestore(&phba->hbalock, flags);
 		if (!list_empty(&completions)) {
 			lpfc_sli_cancel_iocbs(phba, &completions,
 					      IOSTAT_LOCAL_REJECT,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 60d6ca2..7801601 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4437,6 +4437,7 @@
 	if (!ndlp)
 		return;
 	lpfc_issue_els_logo(vport, ndlp, 0);
+	mempool_free(pmb, phba->mbox_mem_pool);
 }
 
 /*
@@ -4456,7 +4457,15 @@
 	int rc;
 	uint16_t rpi;
 
-	if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+	if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
+	    ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
+		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+					 "3366 RPI x%x needs to be "
+					 "unregistered nlp_flag x%x "
+					 "did x%x\n",
+					 ndlp->nlp_rpi, ndlp->nlp_flag,
+					 ndlp->nlp_DID);
 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 		if (mbox) {
 			/* SLI4 ports require the physical rpi value. */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 501147c..647f5bf 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3031,10 +3031,10 @@
 			phba->sli4_hba.scsi_xri_max);
 
 	spin_lock_irq(&phba->scsi_buf_list_get_lock);
-	spin_lock_irq(&phba->scsi_buf_list_put_lock);
+	spin_lock(&phba->scsi_buf_list_put_lock);
 	list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
 	list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
-	spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+	spin_unlock(&phba->scsi_buf_list_put_lock);
 	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
 	if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
@@ -3070,10 +3070,10 @@
 		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
 	}
 	spin_lock_irq(&phba->scsi_buf_list_get_lock);
-	spin_lock_irq(&phba->scsi_buf_list_put_lock);
+	spin_lock(&phba->scsi_buf_list_put_lock);
 	list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
 	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
-	spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+	spin_unlock(&phba->scsi_buf_list_put_lock);
 	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
 	return 0;
@@ -4859,6 +4859,9 @@
 	struct lpfc_mqe *mqe;
 	int longs;
 
+	/* Get all the module params for configuring this host */
+	lpfc_get_cfgparam(phba);
+
 	/* Before proceed, wait for POST done and device ready */
 	rc = lpfc_sli4_post_status_check(phba);
 	if (rc)
@@ -4902,15 +4905,6 @@
 		sizeof(struct lpfc_mbox_ext_buf_ctx));
 	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
 
-	/*
-	 * We need to do a READ_CONFIG mailbox command here before
-	 * calling lpfc_get_cfgparam. For VFs this will report the
-	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
-	 * All of the resources allocated
-	 * for this Port are tied to these values.
-	 */
-	/* Get all the module params for configuring this host */
-	lpfc_get_cfgparam(phba);
 	phba->max_vpi = LPFC_MAX_VPI;
 
 	/* This will be set to correct value after the read_config mbox */
@@ -7141,19 +7135,6 @@
 		phba->sli4_hba.fcp_wq = NULL;
 	}
 
-	if (phba->pci_bar0_memmap_p) {
-		iounmap(phba->pci_bar0_memmap_p);
-		phba->pci_bar0_memmap_p = NULL;
-	}
-	if (phba->pci_bar2_memmap_p) {
-		iounmap(phba->pci_bar2_memmap_p);
-		phba->pci_bar2_memmap_p = NULL;
-	}
-	if (phba->pci_bar4_memmap_p) {
-		iounmap(phba->pci_bar4_memmap_p);
-		phba->pci_bar4_memmap_p = NULL;
-	}
-
 	/* Release FCP CQ mapping array */
 	if (phba->sli4_hba.fcp_cq_map != NULL) {
 		kfree(phba->sli4_hba.fcp_cq_map);
@@ -7942,9 +7923,9 @@
 	 * particular PCI BARs regions is dependent on the type of
 	 * SLI4 device.
 	 */
-	if (pci_resource_start(pdev, 0)) {
-		phba->pci_bar0_map = pci_resource_start(pdev, 0);
-		bar0map_len = pci_resource_len(pdev, 0);
+	if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
+		phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
+		bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
 
 		/*
 		 * Map SLI4 PCI Config Space Register base to a kernel virtual
@@ -7958,6 +7939,7 @@
 				   "registers.\n");
 			goto out;
 		}
+		phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
 		/* Set up BAR0 PCI config space register memory map */
 		lpfc_sli4_bar0_register_memmap(phba, if_type);
 	} else {
@@ -7980,13 +7962,13 @@
 	}
 
 	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
-	    (pci_resource_start(pdev, 2))) {
+	    (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
 		/*
 		 * Map SLI4 if type 0 HBA Control Register base to a kernel
 		 * virtual address and setup the registers.
 		 */
-		phba->pci_bar1_map = pci_resource_start(pdev, 2);
-		bar1map_len = pci_resource_len(pdev, 2);
+		phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
+		bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
 		phba->sli4_hba.ctrl_regs_memmap_p =
 				ioremap(phba->pci_bar1_map, bar1map_len);
 		if (!phba->sli4_hba.ctrl_regs_memmap_p) {
@@ -7994,17 +7976,18 @@
 			   "ioremap failed for SLI4 HBA control registers.\n");
 			goto out_iounmap_conf;
 		}
+		phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
 		lpfc_sli4_bar1_register_memmap(phba);
 	}
 
 	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
-	    (pci_resource_start(pdev, 4))) {
+	    (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
 		/*
 		 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
 		 * virtual address and setup the registers.
 		 */
-		phba->pci_bar2_map = pci_resource_start(pdev, 4);
-		bar2map_len = pci_resource_len(pdev, 4);
+		phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
+		bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
 		phba->sli4_hba.drbl_regs_memmap_p =
 				ioremap(phba->pci_bar2_map, bar2map_len);
 		if (!phba->sli4_hba.drbl_regs_memmap_p) {
@@ -8012,6 +7995,7 @@
 			   "ioremap failed for SLI4 HBA doorbell registers.\n");
 			goto out_iounmap_ctrl;
 		}
+		phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
 		error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
 		if (error)
 			goto out_iounmap_all;
@@ -8405,7 +8389,8 @@
 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
 {
 	int i, idx, saved_chann, used_chann, cpu, phys_id;
-	int max_phys_id, num_io_channel, first_cpu;
+	int max_phys_id, min_phys_id;
+	int num_io_channel, first_cpu, chan;
 	struct lpfc_vector_map_info *cpup;
 #ifdef CONFIG_X86
 	struct cpuinfo_x86 *cpuinfo;
@@ -8423,6 +8408,7 @@
 		phba->sli4_hba.num_present_cpu));
 
 	max_phys_id = 0;
+	min_phys_id = 0xff;
 	phys_id = 0;
 	num_io_channel = 0;
 	first_cpu = LPFC_VECTOR_MAP_EMPTY;
@@ -8446,9 +8432,12 @@
 
 		if (cpup->phys_id > max_phys_id)
 			max_phys_id = cpup->phys_id;
+		if (cpup->phys_id < min_phys_id)
+			min_phys_id = cpup->phys_id;
 		cpup++;
 	}
 
+	phys_id = min_phys_id;
 	/* Now associate the HBA vectors with specific CPUs */
 	for (idx = 0; idx < vectors; idx++) {
 		cpup = phba->sli4_hba.cpu_map;
@@ -8459,13 +8448,25 @@
 			for (i = 1; i < max_phys_id; i++) {
 				phys_id++;
 				if (phys_id > max_phys_id)
-					phys_id = 0;
+					phys_id = min_phys_id;
 				cpu = lpfc_find_next_cpu(phba, phys_id);
 				if (cpu == LPFC_VECTOR_MAP_EMPTY)
 					continue;
 				goto found;
 			}
 
+			/* Use round robin for scheduling */
+			phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
+			chan = 0;
+			cpup = phba->sli4_hba.cpu_map;
+			for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+				cpup->channel_id = chan;
+				cpup++;
+				chan++;
+				if (chan >= phba->cfg_fcp_io_channel)
+					chan = 0;
+			}
+
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"3329 Cannot set affinity:"
 					"Error mapping vector %d (%d)\n",
@@ -8503,7 +8504,7 @@
 		/* Spread vector mapping across multple physical CPU nodes */
 		phys_id++;
 		if (phys_id > max_phys_id)
-			phys_id = 0;
+			phys_id = min_phys_id;
 	}
 
 	/*
@@ -8513,7 +8514,7 @@
 	 * Base the remaining IO channel assigned, to IO channels already
 	 * assigned to other CPUs on the same phys_id.
 	 */
-	for (i = 0; i <= max_phys_id; i++) {
+	for (i = min_phys_id; i <= max_phys_id; i++) {
 		/*
 		 * If there are no io channels already mapped to
 		 * this phys_id, just round robin thru the io_channels.
@@ -8595,10 +8596,11 @@
 	if (num_io_channel != phba->sli4_hba.num_present_cpu)
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"3333 Set affinity mismatch:"
-				"%d chann != %d cpus: %d vactors\n",
+				"%d chann != %d cpus: %d vectors\n",
 				num_io_channel, phba->sli4_hba.num_present_cpu,
 				vectors);
 
+	/* Enable using cpu affinity for scheduling */
 	phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
 	return 1;
 }
@@ -8689,9 +8691,12 @@
 
 cfg_fail_out:
 	/* free the irq already requested */
-	for (--index; index >= 0; index--)
+	for (--index; index >= 0; index--) {
+		irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
+					  vector, NULL);
 		free_irq(phba->sli4_hba.msix_entries[index].vector,
 			 &phba->sli4_hba.fcp_eq_hdl[index]);
+	}
 
 msi_fail_out:
 	/* Unconfigure MSI-X capability structure */
@@ -8712,9 +8717,12 @@
 	int index;
 
 	/* Free up MSI-X multi-message vectors */
-	for (index = 0; index < phba->cfg_fcp_io_channel; index++)
+	for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
+		irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
+					  vector, NULL);
 		free_irq(phba->sli4_hba.msix_entries[index].vector,
 			 &phba->sli4_hba.fcp_eq_hdl[index]);
+	}
 
 	/* Disable MSI-X */
 	pci_disable_msix(phba->pcidev);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1242b6c..c913e8c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -926,10 +926,10 @@
 
 	/* get all SCSI buffers need to repost to a local list */
 	spin_lock_irq(&phba->scsi_buf_list_get_lock);
-	spin_lock_irq(&phba->scsi_buf_list_put_lock);
+	spin_lock(&phba->scsi_buf_list_put_lock);
 	list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
 	list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
-	spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+	spin_unlock(&phba->scsi_buf_list_put_lock);
 	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
 	/* post the list of scsi buffer sgls to port if available */
@@ -1000,9 +1000,12 @@
 		}
 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 
-		/* Page alignment is CRITICAL, double check to be sure */
-		if (((unsigned long)(psb->data) &
-		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
+		/*
+		 * 4K Page alignment is CRITICAL to BlockGuard, double check
+		 * to be sure.
+		 */
+		if (phba->cfg_enable_bg  && (((unsigned long)(psb->data) &
+		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
 				      psb->data, psb->dma_handle);
 			kfree(psb);
@@ -1134,22 +1137,21 @@
 {
 	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
 	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
-	unsigned long gflag = 0;
-	unsigned long pflag = 0;
+	unsigned long iflag = 0;
 
-	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
 	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
 			 list);
 	if (!lpfc_cmd) {
-		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+		spin_lock(&phba->scsi_buf_list_put_lock);
 		list_splice(&phba->lpfc_scsi_buf_list_put,
 			    &phba->lpfc_scsi_buf_list_get);
 		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
 		list_remove_head(scsi_buf_list_get, lpfc_cmd,
 				 struct lpfc_scsi_buf, list);
-		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+		spin_unlock(&phba->scsi_buf_list_put_lock);
 	}
-	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
+	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
 	return  lpfc_cmd;
 }
 /**
@@ -1167,11 +1169,10 @@
 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
 	struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
-	unsigned long gflag = 0;
-	unsigned long pflag = 0;
+	unsigned long iflag = 0;
 	int found = 0;
 
-	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
 	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
 				 &phba->lpfc_scsi_buf_list_get, list) {
 		if (lpfc_test_rrq_active(phba, ndlp,
@@ -1182,11 +1183,11 @@
 		break;
 	}
 	if (!found) {
-		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+		spin_lock(&phba->scsi_buf_list_put_lock);
 		list_splice(&phba->lpfc_scsi_buf_list_put,
 			    &phba->lpfc_scsi_buf_list_get);
 		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
-		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+		spin_unlock(&phba->scsi_buf_list_put_lock);
 		list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
 					 &phba->lpfc_scsi_buf_list_get, list) {
 			if (lpfc_test_rrq_active(
@@ -1197,7 +1198,7 @@
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
+	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
 	if (!found)
 		return NULL;
 	return  lpfc_cmd;
@@ -3966,11 +3967,11 @@
 
 	/*
 	 * Check SLI validation that all the transfer was actually done
-	 * (fcpi_parm should be zero).
+	 * (fcpi_parm should be zero). Apply check only to reads.
 	 */
-	} else if (fcpi_parm) {
+	} else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
-				 "9029 FCP Data Transfer Check Error: "
+				 "9029 FCP Read Check Error Data: "
 				 "x%x x%x x%x x%x x%x\n",
 				 be32_to_cpu(fcpcmd->fcpDl),
 				 be32_to_cpu(fcprsp->rspResId),
@@ -4342,6 +4343,7 @@
 	char tag[2];
 	uint8_t *ptr;
 	bool sli4;
+	uint32_t fcpdl;
 
 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
 		return;
@@ -4389,8 +4391,12 @@
 			iocb_cmd->ulpPU = PARM_READ_CHECK;
 			if (vport->cfg_first_burst_size &&
 			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
-				piocbq->iocb.un.fcpi.fcpi_XRdy =
-					vport->cfg_first_burst_size;
+				fcpdl = scsi_bufflen(scsi_cmnd);
+				if (fcpdl < vport->cfg_first_burst_size)
+					piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
+				else
+					piocbq->iocb.un.fcpi.fcpi_XRdy =
+						vport->cfg_first_burst_size;
 			}
 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
 			phba->fc4OutputRequests++;
@@ -4878,6 +4884,9 @@
 		goto out_unlock;
 	}
 
+	/* Indicate the IO is being aborted by the driver. */
+	iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
 	/*
 	 * The scsi command can not be in txq and it is in flight because the
 	 * pCmd is still pointig at the SCSI command we have to abort. There
@@ -5006,7 +5015,7 @@
 	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
 	if (lpfc_cmd == NULL)
 		return FAILED;
-	lpfc_cmd->timeout = 60;
+	lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
 	lpfc_cmd->rdata = rdata;
 
 	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0392e11..612f489 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -9831,6 +9831,13 @@
 					       abort_cmd) != 0)
 			continue;
 
+		/*
+		 * If the iocbq is already being aborted, don't take a second
+		 * action, but do count it.
+		 */
+		if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+			continue;
+
 		/* issue ABTS for this IOCB based on iotag */
 		abtsiocb = lpfc_sli_get_iocbq(phba);
 		if (abtsiocb == NULL) {
@@ -9838,6 +9845,9 @@
 			continue;
 		}
 
+		/* indicate the IO is being aborted by the driver. */
+		iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
 		cmd = &iocbq->iocb;
 		abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
 		abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
@@ -9847,7 +9857,7 @@
 			abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
 		abtsiocb->iocb.ulpLe = 1;
 		abtsiocb->iocb.ulpClass = cmd->ulpClass;
-		abtsiocb->vport = phba->pport;
+		abtsiocb->vport = vport;
 
 		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
 		abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
@@ -12233,7 +12243,6 @@
 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
 {
 	struct pci_dev *pdev;
-	unsigned long bar_map, bar_map_len;
 
 	if (!phba->pcidev)
 		return NULL;
@@ -12242,25 +12251,10 @@
 
 	switch (pci_barset) {
 	case WQ_PCI_BAR_0_AND_1:
-		if (!phba->pci_bar0_memmap_p) {
-			bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
-			bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
-			phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
-		}
 		return phba->pci_bar0_memmap_p;
 	case WQ_PCI_BAR_2_AND_3:
-		if (!phba->pci_bar2_memmap_p) {
-			bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
-			bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
-			phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
-		}
 		return phba->pci_bar2_memmap_p;
 	case WQ_PCI_BAR_4_AND_5:
-		if (!phba->pci_bar4_memmap_p) {
-			bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
-			bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
-			phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
-		}
 		return phba->pci_bar4_memmap_p;
 	default:
 		break;
@@ -15808,7 +15802,7 @@
 void
 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
 {
-	struct lpfc_fcf_pri *fcf_pri;
+	struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
 	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
 				"2762 FCF (x%x) reached driver's book "
@@ -15818,7 +15812,8 @@
 	}
 	/* Clear the eligible FCF record index bmask */
 	spin_lock_irq(&phba->hbalock);
-	list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+	list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
+				 list) {
 		if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
 			list_del_init(&fcf_pri->list);
 			break;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 9761799..6b0f247 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -58,7 +58,7 @@
 
 	IOCB_t iocb;		/* IOCB cmd */
 	uint8_t retry;		/* retry counter for IOCB cmd - if needed */
-	uint16_t iocb_flag;
+	uint32_t iocb_flag;
 #define LPFC_IO_LIBDFC		1	/* libdfc iocb */
 #define LPFC_IO_WAKE		2	/* Synchronous I/O completed */
 #define LPFC_IO_WAKE_TMO	LPFC_IO_WAKE /* Synchronous I/O timed out */
@@ -73,11 +73,11 @@
 #define LPFC_IO_DIF_PASS	0x400	/* T10 DIF IO pass-thru prot */
 #define LPFC_IO_DIF_STRIP	0x800	/* T10 DIF IO strip prot */
 #define LPFC_IO_DIF_INSERT	0x1000	/* T10 DIF IO insert prot */
+#define LPFC_IO_CMD_OUTSTANDING	0x2000 /* timeout handler abort window */
 
 #define LPFC_FIP_ELS_ID_MASK	0xc000	/* ELS_ID range 0-3, non-shifted mask */
 #define LPFC_FIP_ELS_ID_SHIFT	14
 
-	uint8_t rsvd2;
 	uint32_t drvrTimeout;	/* driver timeout in seconds */
 	uint32_t fcp_wqidx;	/* index to FCP work queue */
 	struct lpfc_vport *vport;/* virtual port pointer */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 5bcc382..85120b7 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -523,7 +523,7 @@
 	struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
 	struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
 
-	uint8_t fw_func_mode;	/* FW function protocol mode */
+	uint32_t fw_func_mode;	/* FW function protocol mode */
 	uint32_t ulp0_mode;	/* ULP0 protocol mode */
 	uint32_t ulp1_mode;	/* ULP1 protocol mode */
 
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 21859d2..f58f183 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.41"
+#define LPFC_DRIVER_VERSION "8.3.42"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 04a42a5..0c73ba4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"06.600.18.00-rc1"
-#define MEGASAS_RELDATE				"May. 15, 2013"
-#define MEGASAS_EXT_VERSION			"Wed. May. 15 17:00:00 PDT 2013"
+#define MEGASAS_VERSION				"06.700.06.00-rc1"
+#define MEGASAS_RELDATE				"Aug. 31, 2013"
+#define MEGASAS_EXT_VERSION			"Sat. Aug. 31 17:00:00 PDT 2013"
 
 /*
  * Device IDs
@@ -170,6 +170,7 @@
 
 #define MR_DCMD_CTRL_GET_INFO			0x01010000
 #define MR_DCMD_LD_GET_LIST			0x03010000
+#define MR_DCMD_LD_LIST_QUERY			0x03010100
 
 #define MR_DCMD_CTRL_CACHE_FLUSH		0x01101000
 #define MR_FLUSH_CTRL_CACHE			0x01
@@ -345,6 +346,15 @@
 	MR_PD_QUERY_TYPE_EXPOSED_TO_HOST    = 5,
 };
 
+enum MR_LD_QUERY_TYPE {
+	MR_LD_QUERY_TYPE_ALL	         = 0,
+	MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1,
+	MR_LD_QUERY_TYPE_USED_TGT_IDS    = 2,
+	MR_LD_QUERY_TYPE_CLUSTER_ACCESS  = 3,
+	MR_LD_QUERY_TYPE_CLUSTER_LOCALE  = 4,
+};
+
+
 #define MR_EVT_CFG_CLEARED                              0x0004
 #define MR_EVT_LD_STATE_CHANGE                          0x0051
 #define MR_EVT_PD_INSERTED                              0x005b
@@ -435,6 +445,14 @@
 	} ldList[MAX_LOGICAL_DRIVES];
 } __packed;
 
+struct MR_LD_TARGETID_LIST {
+	u32	size;
+	u32	count;
+	u8	pad[3];
+	u8	targetId[MAX_LOGICAL_DRIVES];
+};
+
+
 /*
  * SAS controller properties
  */
@@ -474,21 +492,39 @@
 	* a bit in the following structure.
 	*/
 	struct {
-		u32     copyBackDisabled            : 1;
-		u32     SMARTerEnabled              : 1;
-		u32     prCorrectUnconfiguredAreas  : 1;
-		u32     useFdeOnly                  : 1;
-		u32     disableNCQ                  : 1;
-		u32     SSDSMARTerEnabled           : 1;
-		u32     SSDPatrolReadEnabled        : 1;
-		u32     enableSpinDownUnconfigured  : 1;
-		u32     autoEnhancedImport          : 1;
-		u32     enableSecretKeyControl      : 1;
-		u32     disableOnlineCtrlReset      : 1;
-		u32     allowBootWithPinnedCache    : 1;
-		u32     disableSpinDownHS           : 1;
-		u32     enableJBOD                  : 1;
-		u32     reserved                    :18;
+#if   defined(__BIG_ENDIAN_BITFIELD)
+		u32     reserved:18;
+		u32     enableJBOD:1;
+		u32     disableSpinDownHS:1;
+		u32     allowBootWithPinnedCache:1;
+		u32     disableOnlineCtrlReset:1;
+		u32     enableSecretKeyControl:1;
+		u32     autoEnhancedImport:1;
+		u32     enableSpinDownUnconfigured:1;
+		u32     SSDPatrolReadEnabled:1;
+		u32     SSDSMARTerEnabled:1;
+		u32     disableNCQ:1;
+		u32     useFdeOnly:1;
+		u32     prCorrectUnconfiguredAreas:1;
+		u32     SMARTerEnabled:1;
+		u32     copyBackDisabled:1;
+#else
+		u32     copyBackDisabled:1;
+		u32     SMARTerEnabled:1;
+		u32     prCorrectUnconfiguredAreas:1;
+		u32     useFdeOnly:1;
+		u32     disableNCQ:1;
+		u32     SSDSMARTerEnabled:1;
+		u32     SSDPatrolReadEnabled:1;
+		u32     enableSpinDownUnconfigured:1;
+		u32     autoEnhancedImport:1;
+		u32     enableSecretKeyControl:1;
+		u32     disableOnlineCtrlReset:1;
+		u32     allowBootWithPinnedCache:1;
+		u32     disableSpinDownHS:1;
+		u32     enableJBOD:1;
+		u32     reserved:18;
+#endif
 	} OnOffProperties;
 	u8 autoSnapVDSpace;
 	u8 viewSpace;
@@ -802,6 +838,30 @@
 	u16 cacheMemorySize;                    /*7A2h */
 
 	struct {                                /*7A4h */
+#if   defined(__BIG_ENDIAN_BITFIELD)
+		u32     reserved:11;
+		u32     supportUnevenSpans:1;
+		u32     dedicatedHotSparesLimited:1;
+		u32     headlessMode:1;
+		u32     supportEmulatedDrives:1;
+		u32     supportResetNow:1;
+		u32     realTimeScheduler:1;
+		u32     supportSSDPatrolRead:1;
+		u32     supportPerfTuning:1;
+		u32     disableOnlinePFKChange:1;
+		u32     supportJBOD:1;
+		u32     supportBootTimePFKChange:1;
+		u32     supportSetLinkSpeed:1;
+		u32     supportEmergencySpares:1;
+		u32     supportSuspendResumeBGops:1;
+		u32     blockSSDWriteCacheChange:1;
+		u32     supportShieldState:1;
+		u32     supportLdBBMInfo:1;
+		u32     supportLdPIType3:1;
+		u32     supportLdPIType2:1;
+		u32     supportLdPIType1:1;
+		u32     supportPIcontroller:1;
+#else
 		u32     supportPIcontroller:1;
 		u32     supportLdPIType1:1;
 		u32     supportLdPIType2:1;
@@ -827,6 +887,7 @@
 
 		u32     supportUnevenSpans:1;
 		u32     reserved:11;
+#endif
 	} adapterOperations2;
 
 	u8  driverVersion[32];                  /*7A8h */
@@ -863,7 +924,7 @@
  * ===============================
  */
 #define MEGASAS_MAX_PD_CHANNELS			2
-#define MEGASAS_MAX_LD_CHANNELS			2
+#define MEGASAS_MAX_LD_CHANNELS			1
 #define MEGASAS_MAX_CHANNELS			(MEGASAS_MAX_PD_CHANNELS + \
 						MEGASAS_MAX_LD_CHANNELS)
 #define MEGASAS_MAX_DEV_PER_CHANNEL		128
@@ -1051,9 +1112,15 @@
 
 typedef union _MFI_CAPABILITIES {
 	struct {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+		u32     reserved:30;
+		u32     support_additional_msix:1;
+		u32     support_fp_remote_lun:1;
+#else
 		u32     support_fp_remote_lun:1;
 		u32     support_additional_msix:1;
 		u32     reserved:30;
+#endif
 	} mfi_capabilities;
 	u32     reg;
 } MFI_CAPABILITIES;
@@ -1656,4 +1723,16 @@
 	int max_index;
 };
 
+u8
+MR_BuildRaidContext(struct megasas_instance *instance,
+		    struct IO_REQUEST_INFO *io_info,
+		    struct RAID_CONTEXT *pRAID_Context,
+		    struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
+u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+
 #endif				/*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 1f0ca68..3020921 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : 06.600.18.00-rc1
+ *  Version : 06.700.06.00-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
@@ -92,6 +92,8 @@
 
 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
 static int megasas_get_pd_list(struct megasas_instance *instance);
+static int megasas_ld_list_query(struct megasas_instance *instance,
+				 u8 query_type);
 static int megasas_issue_init_mfi(struct megasas_instance *instance);
 static int megasas_register_aen(struct megasas_instance *instance,
 				u32 seq_num, u32 class_locale_word);
@@ -374,13 +376,11 @@
 megasas_check_reset_xscale(struct megasas_instance *instance,
 		struct megasas_register_set __iomem *regs)
 {
-	u32 consumer;
-	consumer = *instance->consumer;
 
 	if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
-		(*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) {
+	    (le32_to_cpu(*instance->consumer) ==
+		MEGASAS_ADPRESET_INPROG_SIGN))
 		return 1;
-	}
 	return 0;
 }
 
@@ -629,9 +629,10 @@
 {
 	unsigned long flags;
 	spin_lock_irqsave(&instance->hba_lock, flags);
-	writel(0, &(regs)->inbound_high_queue_port);
-	writel((frame_phys_addr | (frame_count<<1))|1,
-		&(regs)->inbound_low_queue_port);
+	writel(upper_32_bits(frame_phys_addr),
+	       &(regs)->inbound_high_queue_port);
+	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
+	       &(regs)->inbound_low_queue_port);
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
 }
 
@@ -879,8 +880,8 @@
 
 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
 
-	frame_hdr->cmd_status = 0xFF;
-	frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+	frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
+	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
 
 	/*
 	 * Issue the frame using inbound queue port
@@ -944,10 +945,12 @@
 	 */
 	abort_fr->cmd = MFI_CMD_ABORT;
 	abort_fr->cmd_status = 0xFF;
-	abort_fr->flags = 0;
-	abort_fr->abort_context = cmd_to_abort->index;
-	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
-	abort_fr->abort_mfi_phys_addr_hi = 0;
+	abort_fr->flags = cpu_to_le16(0);
+	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
+	abort_fr->abort_mfi_phys_addr_lo =
+		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
+	abort_fr->abort_mfi_phys_addr_hi =
+		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
 
 	cmd->sync_cmd = 1;
 	cmd->cmd_status = 0xFF;
@@ -986,8 +989,8 @@
 
 	if (sge_count) {
 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
-			mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
-			mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
+			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
 		}
 	}
 	return sge_count;
@@ -1015,8 +1018,8 @@
 
 	if (sge_count) {
 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
-			mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
-			mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
+			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
 		}
 	}
 	return sge_count;
@@ -1043,10 +1046,11 @@
 
 	if (sge_count) {
 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
-			mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
+			mfi_sgl->sge_skinny[i].length =
+				cpu_to_le32(sg_dma_len(os_sgl));
 			mfi_sgl->sge_skinny[i].phys_addr =
-						sg_dma_address(os_sgl);
-			mfi_sgl->sge_skinny[i].flag = 0;
+				cpu_to_le64(sg_dma_address(os_sgl));
+			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
 		}
 	}
 	return sge_count;
@@ -1155,8 +1159,8 @@
 	pthru->cdb_len = scp->cmd_len;
 	pthru->timeout = 0;
 	pthru->pad_0 = 0;
-	pthru->flags = flags;
-	pthru->data_xfer_len = scsi_bufflen(scp);
+	pthru->flags = cpu_to_le16(flags);
+	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
 
 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
 
@@ -1168,18 +1172,18 @@
 		if ((scp->request->timeout / HZ) > 0xFFFF)
 			pthru->timeout = 0xFFFF;
 		else
-			pthru->timeout = scp->request->timeout / HZ;
+			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
 	}
 
 	/*
 	 * Construct SGL
 	 */
 	if (instance->flag_ieee == 1) {
-		pthru->flags |= MFI_FRAME_SGL64;
+		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
 						      &pthru->sgl);
 	} else if (IS_DMA64) {
-		pthru->flags |= MFI_FRAME_SGL64;
+		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
 		pthru->sge_count = megasas_make_sgl64(instance, scp,
 						      &pthru->sgl);
 	} else
@@ -1196,8 +1200,10 @@
 	 * Sense info specific
 	 */
 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
-	pthru->sense_buf_phys_addr_hi = 0;
-	pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
+	pthru->sense_buf_phys_addr_hi =
+		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
+	pthru->sense_buf_phys_addr_lo =
+		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
 
 	/*
 	 * Compute the total number of frames this command consumes. FW uses
@@ -1248,7 +1254,7 @@
 	ldio->timeout = 0;
 	ldio->reserved_0 = 0;
 	ldio->pad_0 = 0;
-	ldio->flags = flags;
+	ldio->flags = cpu_to_le16(flags);
 	ldio->start_lba_hi = 0;
 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
 
@@ -1256,52 +1262,59 @@
 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
 	 */
 	if (scp->cmd_len == 6) {
-		ldio->lba_count = (u32) scp->cmnd[4];
-		ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) |
-		    ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
+		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
+		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
+						 ((u32) scp->cmnd[2] << 8) |
+						 (u32) scp->cmnd[3]);
 
-		ldio->start_lba_lo &= 0x1FFFFF;
+		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
 	}
 
 	/*
 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
 	 */
 	else if (scp->cmd_len == 10) {
-		ldio->lba_count = (u32) scp->cmnd[8] |
-		    ((u32) scp->cmnd[7] << 8);
-		ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
-		    ((u32) scp->cmnd[3] << 16) |
-		    ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
+					      ((u32) scp->cmnd[7] << 8));
+		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+						 ((u32) scp->cmnd[3] << 16) |
+						 ((u32) scp->cmnd[4] << 8) |
+						 (u32) scp->cmnd[5]);
 	}
 
 	/*
 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
 	 */
 	else if (scp->cmd_len == 12) {
-		ldio->lba_count = ((u32) scp->cmnd[6] << 24) |
-		    ((u32) scp->cmnd[7] << 16) |
-		    ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+					      ((u32) scp->cmnd[7] << 16) |
+					      ((u32) scp->cmnd[8] << 8) |
+					      (u32) scp->cmnd[9]);
 
-		ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
-		    ((u32) scp->cmnd[3] << 16) |
-		    ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+						 ((u32) scp->cmnd[3] << 16) |
+						 ((u32) scp->cmnd[4] << 8) |
+						 (u32) scp->cmnd[5]);
 	}
 
 	/*
 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
 	 */
 	else if (scp->cmd_len == 16) {
-		ldio->lba_count = ((u32) scp->cmnd[10] << 24) |
-		    ((u32) scp->cmnd[11] << 16) |
-		    ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
+		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
+					      ((u32) scp->cmnd[11] << 16) |
+					      ((u32) scp->cmnd[12] << 8) |
+					      (u32) scp->cmnd[13]);
 
-		ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) |
-		    ((u32) scp->cmnd[7] << 16) |
-		    ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+						 ((u32) scp->cmnd[7] << 16) |
+						 ((u32) scp->cmnd[8] << 8) |
+						 (u32) scp->cmnd[9]);
 
-		ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) |
-		    ((u32) scp->cmnd[3] << 16) |
-		    ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+						 ((u32) scp->cmnd[3] << 16) |
+						 ((u32) scp->cmnd[4] << 8) |
+						 (u32) scp->cmnd[5]);
 
 	}
 
@@ -1309,11 +1322,11 @@
 	 * Construct SGL
 	 */
 	if (instance->flag_ieee) {
-		ldio->flags |= MFI_FRAME_SGL64;
+		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
 					      &ldio->sgl);
 	} else if (IS_DMA64) {
-		ldio->flags |= MFI_FRAME_SGL64;
+		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
 	} else
 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
@@ -1329,7 +1342,7 @@
 	 */
 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
 	ldio->sense_buf_phys_addr_hi = 0;
-	ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
+	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
 
 	/*
 	 * Compute the total number of frames this command consumes. FW uses
@@ -1400,20 +1413,32 @@
 			ldio = (struct megasas_io_frame *)cmd->frame;
 			mfi_sgl = &ldio->sgl;
 			sgcount = ldio->sge_count;
-			printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount);
+			printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
+			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
+			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
+			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
 		}
 		else {
 			pthru = (struct megasas_pthru_frame *) cmd->frame;
 			mfi_sgl = &pthru->sgl;
 			sgcount = pthru->sge_count;
-			printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount);
+			printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
+			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
+			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
+			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
 		}
 	if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
 		for (n = 0; n < sgcount; n++){
 			if (IS_DMA64)
-				printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ;
+				printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ",
+					le32_to_cpu(mfi_sgl->sge64[n].length),
+					le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
 			else
-				printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ;
+				printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",
+					le32_to_cpu(mfi_sgl->sge32[n].length),
+					le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
 			}
 		}
 		printk(KERN_ERR "\n");
@@ -1674,11 +1699,11 @@
 
 	spin_lock_irqsave(&instance->completion_lock, flags);
 
-	producer = *instance->producer;
-	consumer = *instance->consumer;
+	producer = le32_to_cpu(*instance->producer);
+	consumer = le32_to_cpu(*instance->consumer);
 
 	while (consumer != producer) {
-		context = instance->reply_queue[consumer];
+		context = le32_to_cpu(instance->reply_queue[consumer]);
 		if (context >= instance->max_fw_cmds) {
 			printk(KERN_ERR "Unexpected context value %x\n",
 				context);
@@ -1695,7 +1720,7 @@
 		}
 	}
 
-	*instance->consumer = producer;
+	*instance->consumer = cpu_to_le32(producer);
 
 	spin_unlock_irqrestore(&instance->completion_lock, flags);
 
@@ -1716,7 +1741,7 @@
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
-		*instance->consumer     = MEGASAS_ADPRESET_INPROG_SIGN;
+		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
 	}
 	instance->instancet->disable_intr(instance);
 	instance->adprecovery   = MEGASAS_ADPRESET_SM_INFAULT;
@@ -2186,6 +2211,7 @@
 	struct megasas_header *hdr = &cmd->frame->hdr;
 	unsigned long flags;
 	struct fusion_context *fusion = instance->ctrl_context;
+	u32 opcode;
 
 	/* flag for the retry reset */
 	cmd->retry_for_fw_reset = 0;
@@ -2287,9 +2313,10 @@
 	case MFI_CMD_SMP:
 	case MFI_CMD_STP:
 	case MFI_CMD_DCMD:
+		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
 		/* Check for LD map update */
-		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
-		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
+		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
 			fusion->fast_path_io = 0;
 			spin_lock_irqsave(instance->host->host_lock, flags);
 			if (cmd->frame->hdr.cmd_status != 0) {
@@ -2323,8 +2350,8 @@
 					       flags);
 			break;
 		}
-		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
-			cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
+		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+		    opcode == MR_DCMD_CTRL_EVENT_GET) {
 			spin_lock_irqsave(&poll_aen_lock, flags);
 			megasas_poll_wait_aen = 0;
 			spin_unlock_irqrestore(&poll_aen_lock, flags);
@@ -2333,7 +2360,7 @@
 		/*
 		 * See if got an event notification
 		 */
-		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
+		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
 			megasas_service_aen(instance, cmd);
 		else
 			megasas_complete_int_cmd(instance, cmd);
@@ -2606,7 +2633,7 @@
 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
 
 				*instance->consumer =
-					MEGASAS_ADPRESET_INPROG_SIGN;
+					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
 			}
 
 
@@ -2983,7 +3010,7 @@
 		}
 
 		memset(cmd->frame, 0, total_sz);
-		cmd->frame->io.context = cmd->index;
+		cmd->frame->io.context = cpu_to_le32(cmd->index);
 		cmd->frame->io.pad_0 = 0;
 		if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
 		    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
@@ -3143,13 +3170,13 @@
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0xFF;
 	dcmd->sge_count = 1;
-	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
 	dcmd->timeout = 0;
 	dcmd->pad_0 = 0;
-	dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
-	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
-	dcmd->sgl.sge32[0].phys_addr = ci_h;
-	dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
+	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
 
 	if (!megasas_issue_polled(instance, cmd)) {
 		ret = 0;
@@ -3164,16 +3191,16 @@
 	pd_addr = ci->addr;
 
 	if ( ret == 0 &&
-		(ci->count <
+	     (le32_to_cpu(ci->count) <
 		  (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
 
 		memset(instance->pd_list, 0,
 			MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
 
-		for (pd_index = 0; pd_index < ci->count; pd_index++) {
+		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
 
 			instance->pd_list[pd_addr->deviceId].tid	=
-							pd_addr->deviceId;
+				le16_to_cpu(pd_addr->deviceId);
 			instance->pd_list[pd_addr->deviceId].driveType	=
 							pd_addr->scsiDevType;
 			instance->pd_list[pd_addr->deviceId].driveState	=
@@ -3207,6 +3234,7 @@
 	struct megasas_dcmd_frame *dcmd;
 	struct MR_LD_LIST *ci;
 	dma_addr_t ci_h = 0;
+	u32 ld_count;
 
 	cmd = megasas_get_cmd(instance);
 
@@ -3233,12 +3261,12 @@
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0xFF;
 	dcmd->sge_count = 1;
-	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
 	dcmd->timeout = 0;
-	dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
-	dcmd->opcode = MR_DCMD_LD_GET_LIST;
-	dcmd->sgl.sge32[0].phys_addr = ci_h;
-	dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
 	dcmd->pad_0  = 0;
 
 	if (!megasas_issue_polled(instance, cmd)) {
@@ -3247,12 +3275,14 @@
 		ret = -1;
 	}
 
+	ld_count = le32_to_cpu(ci->ldCount);
+
 	/* the following function will get the instance PD LIST */
 
-	if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) {
+	if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) {
 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
 
-		for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
+		for (ld_index = 0; ld_index < ld_count; ld_index++) {
 			if (ci->ldList[ld_index].state != 0) {
 				ids = ci->ldList[ld_index].ref.targetId;
 				instance->ld_ids[ids] =
@@ -3271,6 +3301,87 @@
 }
 
 /**
+ * megasas_ld_list_query -	Returns FW's ld_list structure
+ * @instance:				Adapter soft state
+ * @ld_list:				ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
+{
+	int ret = 0, ld_index = 0, ids = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_LD_TARGETID_LIST *ci;
+	dma_addr_t ci_h = 0;
+	u32 tgtid_count;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_WARNING
+		       "megasas:(megasas_ld_list_query): Failed to get cmd\n");
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	ci = pci_alloc_consistent(instance->pdev,
+				  sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
+
+	if (!ci) {
+		printk(KERN_WARNING
+		       "megasas: Failed to alloc mem for ld_list_query\n");
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->mbox.b[0] = query_type;
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+	dcmd->timeout = 0;
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+	dcmd->pad_0  = 0;
+
+	if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) {
+		ret = 0;
+	} else {
+		/* On failure, call older LD list DCMD */
+		ret = 1;
+	}
+
+	tgtid_count = le32_to_cpu(ci->count);
+
+	if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) {
+		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
+			ids = ci->targetId[ld_index];
+			instance->ld_ids[ids] = ci->targetId[ld_index];
+		}
+
+	}
+
+	pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
+			    ci, ci_h);
+
+	megasas_return_cmd(instance, cmd);
+
+	return ret;
+}
+
+/**
  * megasas_get_controller_info -	Returns FW's controller structure
  * @instance:				Adapter soft state
  * @ctrl_info:				Controller information structure
@@ -3313,13 +3424,13 @@
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0xFF;
 	dcmd->sge_count = 1;
-	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
 	dcmd->timeout = 0;
 	dcmd->pad_0 = 0;
-	dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
-	dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
-	dcmd->sgl.sge32[0].phys_addr = ci_h;
-	dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info);
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
 
 	if (!megasas_issue_polled(instance, cmd)) {
 		ret = 0;
@@ -3375,17 +3486,20 @@
 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
 	init_frame->context = context;
 
-	initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
-	initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
+	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
+	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
 
-	initq_info->producer_index_phys_addr_lo = instance->producer_h;
-	initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
+	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
+	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
 
 	init_frame->cmd = MFI_CMD_INIT;
 	init_frame->cmd_status = 0xFF;
-	init_frame->queue_info_new_phys_addr_lo = initq_info_h;
+	init_frame->queue_info_new_phys_addr_lo =
+		cpu_to_le32(lower_32_bits(initq_info_h));
+	init_frame->queue_info_new_phys_addr_hi =
+		cpu_to_le32(upper_32_bits(initq_info_h));
 
-	init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
+	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
 
 	/*
 	 * disable the intr before firing the init frame to FW
@@ -3648,7 +3762,9 @@
 	megasas_get_pd_list(instance);
 
 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
-	megasas_get_ld_list(instance);
+	if (megasas_ld_list_query(instance,
+				  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+		megasas_get_ld_list(instance);
 
 	ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
 
@@ -3665,8 +3781,8 @@
 	if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
 
 		max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
-		    ctrl_info->max_strips_per_io;
-		max_sectors_2 = ctrl_info->max_request_size;
+			le16_to_cpu(ctrl_info->max_strips_per_io);
+		max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
 
 		tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
 
@@ -3675,14 +3791,18 @@
 			instance->is_imr = 0;
 			dev_info(&instance->pdev->dev, "Controller type: MR,"
 				"Memory size is: %dMB\n",
-				ctrl_info->memory_size);
+				le16_to_cpu(ctrl_info->memory_size));
 		} else {
 			instance->is_imr = 1;
 			dev_info(&instance->pdev->dev,
 				"Controller type: iMR\n");
 		}
+		/* OnOffProperties are converted into CPU arch*/
+		le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
 		instance->disableOnlineCtrlReset =
 		ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+		/* adapterOperations2 are converted into CPU arch*/
+		le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
 		instance->UnevenSpanSupport =
 			ctrl_info->adapterOperations2.supportUnevenSpans;
 		if (instance->UnevenSpanSupport) {
@@ -3696,7 +3816,6 @@
 
 		}
 	}
-
 	instance->max_sectors_per_req = instance->max_num_sge *
 						PAGE_SIZE / 512;
 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
@@ -3802,20 +3921,24 @@
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0x0;
 	dcmd->sge_count = 1;
-	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
 	dcmd->timeout = 0;
 	dcmd->pad_0 = 0;
-	dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
-	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
-	dcmd->sgl.sge32[0].phys_addr = el_info_h;
-	dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info);
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
 
 	megasas_issue_blocked_cmd(instance, cmd);
 
 	/*
 	 * Copy the data back into callers buffer
 	 */
-	memcpy(eli, el_info, sizeof(struct megasas_evt_log_info));
+	eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
+	eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
+	eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
+	eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
+	eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
 
 	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
 			    el_info, el_info_h);
@@ -3862,6 +3985,7 @@
 	if (instance->aen_cmd) {
 
 		prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
+		prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale);
 
 		/*
 		 * A class whose enum value is smaller is inclusive of all
@@ -3874,7 +3998,7 @@
 		 * values
 		 */
 		if ((prev_aen.members.class <= curr_aen.members.class) &&
-		    !((prev_aen.members.locale & curr_aen.members.locale) ^
+		    !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^
 		      curr_aen.members.locale)) {
 			/*
 			 * Previously issued event registration includes
@@ -3882,7 +4006,7 @@
 			 */
 			return 0;
 		} else {
-			curr_aen.members.locale |= prev_aen.members.locale;
+			curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale);
 
 			if (prev_aen.members.class < curr_aen.members.class)
 				curr_aen.members.class = prev_aen.members.class;
@@ -3917,16 +4041,16 @@
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0x0;
 	dcmd->sge_count = 1;
-	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
 	dcmd->timeout = 0;
 	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
+	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
 	instance->last_seq_num = seq_num;
-	dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
-	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
-	dcmd->mbox.w[0] = seq_num;
-	dcmd->mbox.w[1] = curr_aen.word;
-	dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
-	dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
+	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
 
 	if (instance->aen_cmd != NULL) {
 		megasas_return_cmd(instance, cmd);
@@ -3972,8 +4096,9 @@
 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
 
-	return megasas_register_aen(instance, eli.newest_seq_num + 1,
-				    class_locale.word);
+	return megasas_register_aen(instance,
+			le32_to_cpu(eli.newest_seq_num) + 1,
+			class_locale.word);
 }
 
 /**
@@ -4068,6 +4193,7 @@
 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
 			goto fail_set_dma_mask;
 	}
+
 	return 0;
 
 fail_set_dma_mask:
@@ -4386,11 +4512,11 @@
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0x0;
 	dcmd->sge_count = 0;
-	dcmd->flags = MFI_FRAME_DIR_NONE;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
 	dcmd->timeout = 0;
 	dcmd->pad_0 = 0;
 	dcmd->data_xfer_len = 0;
-	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
 
 	megasas_issue_blocked_cmd(instance, cmd);
@@ -4431,11 +4557,11 @@
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0x0;
 	dcmd->sge_count = 0;
-	dcmd->flags = MFI_FRAME_DIR_NONE;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
 	dcmd->timeout = 0;
 	dcmd->pad_0 = 0;
 	dcmd->data_xfer_len = 0;
-	dcmd->opcode = opcode;
+	dcmd->opcode = cpu_to_le32(opcode);
 
 	megasas_issue_blocked_cmd(instance, cmd);
 
@@ -4850,10 +4976,11 @@
 	 * alone separately
 	 */
 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
-	cmd->frame->hdr.context = cmd->index;
+	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
 	cmd->frame->hdr.pad_0 = 0;
-	cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
-				   MFI_FRAME_SENSE64);
+	cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
+					       MFI_FRAME_SGL64 |
+					       MFI_FRAME_SENSE64));
 
 	/*
 	 * The management interface between applications and the fw uses
@@ -4887,8 +5014,8 @@
 		 * We don't change the dma_coherent_mask, so
 		 * pci_alloc_consistent only returns 32bit addresses
 		 */
-		kern_sge32[i].phys_addr = (u32) buf_handle;
-		kern_sge32[i].length = ioc->sgl[i].iov_len;
+		kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
+		kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
 
 		/*
 		 * We created a kernel buffer corresponding to the
@@ -4911,7 +5038,7 @@
 
 		sense_ptr =
 		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
-		*sense_ptr = sense_handle;
+		*sense_ptr = cpu_to_le32(sense_handle);
 	}
 
 	/*
@@ -4971,9 +5098,9 @@
 	for (i = 0; i < ioc->sge_count; i++) {
 		if (kbuff_arr[i])
 			dma_free_coherent(&instance->pdev->dev,
-					  kern_sge32[i].length,
+					  le32_to_cpu(kern_sge32[i].length),
 					  kbuff_arr[i],
-					  kern_sge32[i].phys_addr);
+					  le32_to_cpu(kern_sge32[i].phys_addr));
 	}
 
 	megasas_return_cmd(instance, cmd);
@@ -5327,7 +5454,7 @@
 	host = instance->host;
 	if (instance->evt_detail) {
 
-		switch (instance->evt_detail->code) {
+		switch (le32_to_cpu(instance->evt_detail->code)) {
 		case MR_EVT_PD_INSERTED:
 			if (megasas_get_pd_list(instance) == 0) {
 			for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
@@ -5389,7 +5516,9 @@
 		case MR_EVT_LD_OFFLINE:
 		case MR_EVT_CFG_CLEARED:
 		case MR_EVT_LD_DELETED:
-			megasas_get_ld_list(instance);
+			if (megasas_ld_list_query(instance,
+					MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+				megasas_get_ld_list(instance);
 			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
 				for (j = 0;
 				j < MEGASAS_MAX_DEV_PER_CHANNEL;
@@ -5399,7 +5528,7 @@
 				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
 
 				sdev1 = scsi_device_lookup(host,
-					i + MEGASAS_MAX_LD_CHANNELS,
+					MEGASAS_MAX_PD_CHANNELS + i,
 					j,
 					0);
 
@@ -5418,7 +5547,9 @@
 			doscan = 0;
 			break;
 		case MR_EVT_LD_CREATED:
-			megasas_get_ld_list(instance);
+			if (megasas_ld_list_query(instance,
+					MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+				megasas_get_ld_list(instance);
 			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
 				for (j = 0;
 					j < MEGASAS_MAX_DEV_PER_CHANNEL;
@@ -5427,14 +5558,14 @@
 					(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
 
 					sdev1 = scsi_device_lookup(host,
-						i+MEGASAS_MAX_LD_CHANNELS,
+						MEGASAS_MAX_PD_CHANNELS + i,
 						j, 0);
 
 					if (instance->ld_ids[ld_index] !=
 								0xff) {
 						if (!sdev1) {
 							scsi_add_device(host,
-								i + 2,
+						MEGASAS_MAX_PD_CHANNELS + i,
 								j, 0);
 						}
 					}
@@ -5483,18 +5614,20 @@
 			}
 		}
 
-		megasas_get_ld_list(instance);
+		if (megasas_ld_list_query(instance,
+					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+			megasas_get_ld_list(instance);
 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
 				ld_index =
 				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
 
 				sdev1 = scsi_device_lookup(host,
-					i+MEGASAS_MAX_LD_CHANNELS, j, 0);
+					MEGASAS_MAX_PD_CHANNELS + i, j, 0);
 				if (instance->ld_ids[ld_index] != 0xff) {
 					if (!sdev1) {
 						scsi_add_device(host,
-								i+2,
+						MEGASAS_MAX_PD_CHANNELS + i,
 								j, 0);
 					} else {
 						scsi_device_put(sdev1);
@@ -5514,7 +5647,7 @@
 		return ;
 	}
 
-	seq_num = instance->evt_detail->seq_num + 1;
+	seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
 
 	/* Register AEN with FW for latest sequence number plus 1 */
 	class_locale.members.reserved = 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 4f401f7..e24b6eb 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -126,17 +126,17 @@
 	return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
 }
 
-static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
 {
-	return map->raidMap.arMapInfo[ar].pd[arm];
+	return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
 }
 
-static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
 {
-	return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
+	return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
 }
 
-static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
 {
 	return map->raidMap.devHndlInfo[pd].curDevHdl;
 }
@@ -148,7 +148,7 @@
 
 u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
 {
-	return map->raidMap.ldTgtIdToLd[ldTgtId];
+	return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]);
 }
 
 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
@@ -167,18 +167,22 @@
 	struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
 	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
 	struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
+	struct MR_LD_RAID         *raid;
+	int ldCount, num_lds;
+	u16 ld;
 
-	if (pFwRaidMap->totalSize !=
+
+	if (le32_to_cpu(pFwRaidMap->totalSize) !=
 	    (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
-	     (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) {
+	     (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
 		printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
 		       (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
 				       sizeof(struct MR_LD_SPAN_MAP)) +
 				      (sizeof(struct MR_LD_SPAN_MAP) *
-				       pFwRaidMap->ldCount)));
+					le32_to_cpu(pFwRaidMap->ldCount))));
 		printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
 		       ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
-		       pFwRaidMap->totalSize);
+			le32_to_cpu(pFwRaidMap->totalSize));
 		return 0;
 	}
 
@@ -187,6 +191,15 @@
 
 	mr_update_load_balance_params(map, lbInfo);
 
+	num_lds = le32_to_cpu(map->raidMap.ldCount);
+
+	/*Convert Raid capability values to CPU arch */
+	for (ldCount = 0; ldCount < num_lds; ldCount++) {
+		ld = MR_TargetIdToLdGet(ldCount, map);
+		raid = MR_LdRaidGet(ld, map);
+		le32_to_cpus((u32 *)&raid->capability);
+	}
+
 	return 1;
 }
 
@@ -200,23 +213,20 @@
 
 	for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
 
-		for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
+		for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
 			quad = &pSpanBlock->block_span_info.quad[j];
 
-			if (quad->diff == 0)
+			if (le32_to_cpu(quad->diff) == 0)
 				return SPAN_INVALID;
-			if (quad->logStart <= row  &&  row <= quad->logEnd  &&
-			    (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
+			if (le64_to_cpu(quad->logStart) <= row && row <=
+				le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
+				le32_to_cpu(quad->diff))) == 0) {
 				if (span_blk != NULL) {
 					u64  blk, debugBlk;
-					blk =
-						mega_div64_32(
-							(row-quad->logStart),
-							quad->diff);
+					blk =  mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
 					debugBlk = blk;
 
-					blk = (blk + quad->offsetInSpan) <<
-						raid->stripeShift;
+					blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
 					*span_blk = blk;
 				}
 				return span;
@@ -257,8 +267,8 @@
 		for (span = 0; span < raid->spanDepth; span++)
 			dev_dbg(&instance->pdev->dev, "Span=%x,"
 			" number of quads=%x\n", span,
-			map->raidMap.ldSpanMap[ld].spanBlock[span].
-			block_span_info.noElements);
+			le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+			block_span_info.noElements));
 		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
 			span_set = &(ldSpanInfo[ld].span_set[element]);
 			if (span_set->span_row_data_width == 0)
@@ -286,22 +296,22 @@
 				(long unsigned int)span_set->data_strip_end);
 
 			for (span = 0; span < raid->spanDepth; span++) {
-				if (map->raidMap.ldSpanMap[ld].spanBlock[span].
-					block_span_info.noElements >=
+				if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+					block_span_info.noElements) >=
 					element + 1) {
 					quad = &map->raidMap.ldSpanMap[ld].
 						spanBlock[span].block_span_info.
 						quad[element];
 				dev_dbg(&instance->pdev->dev, "Span=%x,"
 					"Quad=%x, diff=%x\n", span,
-					element, quad->diff);
+					element, le32_to_cpu(quad->diff));
 				dev_dbg(&instance->pdev->dev,
 					"offset_in_span=0x%08lx\n",
-					(long unsigned int)quad->offsetInSpan);
+					(long unsigned int)le64_to_cpu(quad->offsetInSpan));
 				dev_dbg(&instance->pdev->dev,
 					"logical start=0x%08lx, end=0x%08lx\n",
-					(long unsigned int)quad->logStart,
-					(long unsigned int)quad->logEnd);
+					(long unsigned int)le64_to_cpu(quad->logStart),
+					(long unsigned int)le64_to_cpu(quad->logEnd));
 				}
 			}
 		}
@@ -348,23 +358,23 @@
 			continue;
 
 		for (span = 0; span < raid->spanDepth; span++)
-			if (map->raidMap.ldSpanMap[ld].spanBlock[span].
-				block_span_info.noElements >= info+1) {
+			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements) >= info+1) {
 				quad = &map->raidMap.ldSpanMap[ld].
 					spanBlock[span].
 					block_span_info.quad[info];
-				if (quad->diff == 0)
+				if (le32_to_cpu(quad->diff == 0))
 					return SPAN_INVALID;
-				if (quad->logStart <= row  &&
-					row <= quad->logEnd  &&
-					(mega_mod64(row - quad->logStart,
-						quad->diff)) == 0) {
+				if (le64_to_cpu(quad->logStart) <= row  &&
+					row <= le64_to_cpu(quad->logEnd)  &&
+					(mega_mod64(row - le64_to_cpu(quad->logStart),
+						le32_to_cpu(quad->diff))) == 0) {
 					if (span_blk != NULL) {
 						u64  blk;
 						blk = mega_div64_32
-						    ((row - quad->logStart),
-						    quad->diff);
-						blk = (blk + quad->offsetInSpan)
+						    ((row - le64_to_cpu(quad->logStart)),
+						    le32_to_cpu(quad->diff));
+						blk = (blk + le64_to_cpu(quad->offsetInSpan))
 							 << raid->stripeShift;
 						*span_blk = blk;
 					}
@@ -415,8 +425,8 @@
 		span_set_Row = mega_div64_32(span_set_Strip,
 				span_set->span_row_data_width) * span_set->diff;
 		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
-			if (map->raidMap.ldSpanMap[ld].spanBlock[span].
-				block_span_info.noElements >= info+1) {
+			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements >= info+1)) {
 				if (strip_offset >=
 					span_set->strip_offset[span])
 					span_offset++;
@@ -480,18 +490,18 @@
 			continue;
 
 		for (span = 0; span < raid->spanDepth; span++)
-			if (map->raidMap.ldSpanMap[ld].spanBlock[span].
-				block_span_info.noElements >= info+1) {
+			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements) >= info+1) {
 				quad = &map->raidMap.ldSpanMap[ld].
 					spanBlock[span].block_span_info.quad[info];
-				if (quad->logStart <= row  &&
-					row <= quad->logEnd  &&
-					mega_mod64((row - quad->logStart),
-					quad->diff) == 0) {
+				if (le64_to_cpu(quad->logStart) <= row  &&
+					row <= le64_to_cpu(quad->logEnd)  &&
+					mega_mod64((row - le64_to_cpu(quad->logStart)),
+					le32_to_cpu(quad->diff)) == 0) {
 					strip = mega_div64_32
 						(((row - span_set->data_row_start)
-							- quad->logStart),
-							quad->diff);
+							- le64_to_cpu(quad->logStart)),
+							le32_to_cpu(quad->diff));
 					strip *= span_set->span_row_data_width;
 					strip += span_set->data_strip_start;
 					strip += span_set->strip_offset[span];
@@ -543,8 +553,8 @@
 				span_set->span_row_data_width);
 
 		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
-			if (map->raidMap.ldSpanMap[ld].spanBlock[span].
-				block_span_info.noElements >= info+1) {
+			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements) >= info+1) {
 				if (strip_offset >=
 					span_set->strip_offset[span])
 					span_offset =
@@ -669,7 +679,7 @@
 		}
 	}
 
-	*pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
 	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
 					physArm;
 	return retval;
@@ -765,7 +775,7 @@
 		}
 	}
 
-	*pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
 	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
 		physArm;
 	return retval;
@@ -784,7 +794,7 @@
 MR_BuildRaidContext(struct megasas_instance *instance,
 		    struct IO_REQUEST_INFO *io_info,
 		    struct RAID_CONTEXT *pRAID_Context,
-		    struct MR_FW_RAID_MAP_ALL *map)
+		    struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN)
 {
 	struct MR_LD_RAID  *raid;
 	u32         ld, stripSize, stripe_mask;
@@ -965,7 +975,7 @@
 			regSize += stripSize;
 	}
 
-	pRAID_Context->timeoutValue     = map->raidMap.fpPdIoTimeoutSec;
+	pRAID_Context->timeoutValue     = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec);
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
 		pRAID_Context->regLockFlags = (isRead) ?
@@ -974,9 +984,12 @@
 		pRAID_Context->regLockFlags = (isRead) ?
 			REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
 	pRAID_Context->VirtualDiskTgtId = raid->targetId;
-	pRAID_Context->regLockRowLBA    = regStart;
-	pRAID_Context->regLockLength    = regSize;
+	pRAID_Context->regLockRowLBA    = cpu_to_le64(regStart);
+	pRAID_Context->regLockLength    = cpu_to_le32(regSize);
 	pRAID_Context->configSeqNum	= raid->seqNum;
+	/* save pointer to raid->LUN array */
+	*raidLUN = raid->LUN;
+
 
 	/*Get Phy Params only if FP capable, or else leave it to MR firmware
 	  to do the calculation.*/
@@ -1047,8 +1060,8 @@
 		raid = MR_LdRaidGet(ld, map);
 		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
 			for (span = 0; span < raid->spanDepth; span++) {
-				if (map->raidMap.ldSpanMap[ld].spanBlock[span].
-					block_span_info.noElements <
+				if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+					block_span_info.noElements) <
 					element + 1)
 					continue;
 				span_set = &(ldSpanInfo[ld].span_set[element]);
@@ -1056,14 +1069,14 @@
 					spanBlock[span].block_span_info.
 					quad[element];
 
-				span_set->diff = quad->diff;
+				span_set->diff = le32_to_cpu(quad->diff);
 
 				for (count = 0, span_row_width = 0;
 					count < raid->spanDepth; count++) {
-					if (map->raidMap.ldSpanMap[ld].
+					if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
 						spanBlock[count].
 						block_span_info.
-						noElements >= element + 1) {
+						noElements) >= element + 1) {
 						span_set->strip_offset[count] =
 							span_row_width;
 						span_row_width +=
@@ -1077,9 +1090,9 @@
 				}
 
 				span_set->span_row_data_width = span_row_width;
-				span_row = mega_div64_32(((quad->logEnd -
-					quad->logStart) + quad->diff),
-					quad->diff);
+				span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
+					le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
+					le32_to_cpu(quad->diff));
 
 				if (element == 0) {
 					span_set->log_start_lba = 0;
@@ -1096,7 +1109,7 @@
 
 					span_set->data_row_start = 0;
 					span_set->data_row_end =
-						(span_row * quad->diff) - 1;
+						(span_row * le32_to_cpu(quad->diff)) - 1;
 				} else {
 					span_set_prev = &(ldSpanInfo[ld].
 							span_set[element - 1]);
@@ -1122,7 +1135,7 @@
 						span_set_prev->data_row_end + 1;
 					span_set->data_row_end =
 						span_set->data_row_start +
-						(span_row * quad->diff) - 1;
+						(span_row * le32_to_cpu(quad->diff)) - 1;
 				}
 				break;
 		}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 417d5f1..f655592 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -72,17 +72,6 @@
 int
 megasas_issue_polled(struct megasas_instance *instance,
 		     struct megasas_cmd *cmd);
-
-u8
-MR_BuildRaidContext(struct megasas_instance *instance,
-		    struct IO_REQUEST_INFO *io_info,
-		    struct RAID_CONTEXT *pRAID_Context,
-		    struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
-struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
-
-u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
-
 void
 megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
 
@@ -626,23 +615,20 @@
 
 	IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
 	IOCInitMessage->WhoInit	= MPI2_WHOINIT_HOST_DRIVER;
-	IOCInitMessage->MsgVersion = MPI2_VERSION;
-	IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION;
-	IOCInitMessage->SystemRequestFrameSize =
-		MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
+	IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
+	IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+	IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
 
-	IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth;
-	IOCInitMessage->ReplyDescriptorPostQueueAddress	=
-		fusion->reply_frames_desc_phys;
-	IOCInitMessage->SystemRequestFrameBaseAddress =
-		fusion->io_request_frames_phys;
+	IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
+	IOCInitMessage->ReplyDescriptorPostQueueAddress	= cpu_to_le64(fusion->reply_frames_desc_phys);
+	IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
 	IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
 	init_frame = (struct megasas_init_frame *)cmd->frame;
 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
 
 	frame_hdr = &cmd->frame->hdr;
 	frame_hdr->cmd_status = 0xFF;
-	frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
 
 	init_frame->cmd	= MFI_CMD_INIT;
 	init_frame->cmd_status = 0xFF;
@@ -652,17 +638,24 @@
 		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
 		init_frame->driver_operations.
 			mfi_capabilities.support_additional_msix = 1;
+	/* driver supports HA / Remote LUN over Fast Path interface */
+	init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
+		= 1;
+	/* Convert capability to LE32 */
+	cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
 
-	init_frame->queue_info_new_phys_addr_lo = ioc_init_handle;
-	init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
+	init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle);
+	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
 
 	req_desc =
 	  (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc;
 
-	req_desc->Words = cmd->frame_phys_addr;
+	req_desc->Words = 0;
 	req_desc->MFAIo.RequestFlags =
 		(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
 		 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+	cpu_to_le32s((u32 *)&req_desc->MFAIo);
+	req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr);
 
 	/*
 	 * disable the intr before firing the init frame
@@ -753,13 +746,13 @@
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0xFF;
 	dcmd->sge_count = 1;
-	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
 	dcmd->timeout = 0;
 	dcmd->pad_0 = 0;
-	dcmd->data_xfer_len = size_map_info;
-	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
-	dcmd->sgl.sge32[0].phys_addr = ci_h;
-	dcmd->sgl.sge32[0].length = size_map_info;
+	dcmd->data_xfer_len = cpu_to_le32(size_map_info);
+	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
 
 	if (!megasas_issue_polled(instance, cmd))
 		ret = 0;
@@ -828,7 +821,7 @@
 
 	map = fusion->ld_map[instance->map_id & 1];
 
-	num_lds = map->raidMap.ldCount;
+	num_lds = le32_to_cpu(map->raidMap.ldCount);
 
 	dcmd = &cmd->frame->dcmd;
 
@@ -856,15 +849,15 @@
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0xFF;
 	dcmd->sge_count = 1;
-	dcmd->flags = MFI_FRAME_DIR_WRITE;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
 	dcmd->timeout = 0;
 	dcmd->pad_0 = 0;
-	dcmd->data_xfer_len = size_map_info;
+	dcmd->data_xfer_len = cpu_to_le32(size_map_info);
 	dcmd->mbox.b[0] = num_lds;
 	dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
-	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
-	dcmd->sgl.sge32[0].phys_addr = ci_h;
-	dcmd->sgl.sge32[0].length = size_map_info;
+	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
 
 	instance->map_update_cmd = cmd;
 
@@ -1067,9 +1060,8 @@
 
 	spin_lock_irqsave(&instance->hba_lock, flags);
 
-	writel(req_desc_lo,
-	       &(regs)->inbound_low_queue_port);
-	writel(req_desc_hi, &(regs)->inbound_high_queue_port);
+	writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
+	writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
 }
 
@@ -1157,8 +1149,8 @@
 		return sge_count;
 
 	scsi_for_each_sg(scp, os_sgl, sge_count, i) {
-		sgl_ptr->Length = sg_dma_len(os_sgl);
-		sgl_ptr->Address = sg_dma_address(os_sgl);
+		sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
+		sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
 		sgl_ptr->Flags = 0;
 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 			(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
@@ -1177,9 +1169,9 @@
 				PCI_DEVICE_ID_LSI_INVADER) ||
 				(instance->pdev->device ==
 				PCI_DEVICE_ID_LSI_FURY)) {
-				if ((cmd->io_request->IoFlags &
-				MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
-				MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+				if ((le16_to_cpu(cmd->io_request->IoFlags) &
+					MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
+					MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
 					cmd->io_request->ChainOffset =
 						fusion->
 						chain_offset_io_request;
@@ -1201,9 +1193,8 @@
 				sg_chain->Flags =
 					(IEEE_SGE_FLAGS_CHAIN_ELEMENT |
 					 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
-			sg_chain->Length =  (sizeof(union MPI2_SGE_IO_UNION)
-					     *(sge_count - sg_processed));
-			sg_chain->Address = cmd->sg_frame_phys_addr;
+			sg_chain->Length =  cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
+			sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
 
 			sgl_ptr =
 			  (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
@@ -1261,7 +1252,7 @@
 		io_request->CDB.EEDP32.PrimaryReferenceTag =
 			cpu_to_be32(ref_tag);
 		io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
-		io_request->IoFlags = 32; /* Specify 32-byte cdb */
+		io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
 
 		/* Transfer length */
 		cdb[28] = (u8)((num_blocks >> 24) & 0xff);
@@ -1271,19 +1262,19 @@
 
 		/* set SCSI IO EEDPFlags */
 		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
-			io_request->EEDPFlags =
+			io_request->EEDPFlags = cpu_to_le16(
 				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
 				MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
 				MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
 				MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
-				MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+				MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
 		} else {
-			io_request->EEDPFlags =
+			io_request->EEDPFlags = cpu_to_le16(
 				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
-				MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+				MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
 		}
-		io_request->Control |= (0x4 << 26);
-		io_request->EEDPBlockSize = scp->device->sector_size;
+		io_request->Control |= cpu_to_le32((0x4 << 26));
+		io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
 	} else {
 		/* Some drives don't support 16/12 byte CDB's, convert to 10 */
 		if (((cdb_len == 12) || (cdb_len == 16)) &&
@@ -1311,7 +1302,7 @@
 			cdb[8] = (u8)(num_blocks & 0xff);
 			cdb[7] = (u8)((num_blocks >> 8) & 0xff);
 
-			io_request->IoFlags = 10; /* Specify 10-byte cdb */
+			io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
 			cdb_len = 10;
 		} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
 			/* Convert to 16 byte CDB for large LBA's */
@@ -1349,7 +1340,7 @@
 			cdb[11] = (u8)((num_blocks >> 16) & 0xff);
 			cdb[10] = (u8)((num_blocks >> 24) & 0xff);
 
-			io_request->IoFlags = 16; /* Specify 16-byte cdb */
+			io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
 			cdb_len = 16;
 		}
 
@@ -1410,13 +1401,14 @@
 	struct IO_REQUEST_INFO io_info;
 	struct fusion_context *fusion;
 	struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+	u8 *raidLUN;
 
 	device_id = MEGASAS_DEV_INDEX(instance, scp);
 
 	fusion = instance->ctrl_context;
 
 	io_request = cmd->io_request;
-	io_request->RaidContext.VirtualDiskTgtId = device_id;
+	io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
 	io_request->RaidContext.status = 0;
 	io_request->RaidContext.exStatus = 0;
 
@@ -1480,7 +1472,7 @@
 	io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
 	io_info.numBlocks = datalength;
 	io_info.ldTgtId = device_id;
-	io_request->DataLength = scsi_bufflen(scp);
+	io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
 
 	if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
 		io_info.isRead = 1;
@@ -1494,7 +1486,7 @@
 	} else {
 		if (MR_BuildRaidContext(instance, &io_info,
 					&io_request->RaidContext,
-					local_map_ptr))
+					local_map_ptr, &raidLUN))
 			fp_possible = io_info.fpOkForIo;
 	}
 
@@ -1520,8 +1512,7 @@
 					MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 			io_request->RaidContext.Type = MPI2_TYPE_CUDA;
 			io_request->RaidContext.nseg = 0x1;
-			io_request->IoFlags |=
-			  MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+			io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
 			io_request->RaidContext.regLockFlags |=
 			  (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
 			   MR_RL_FLAGS_SEQ_NUM_ENABLE);
@@ -1537,9 +1528,11 @@
 			scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
 		cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
 		io_request->DevHandle = io_info.devHandle;
+		/* populate the LUN field */
+		memcpy(io_request->LUN, raidLUN, 8);
 	} else {
 		io_request->RaidContext.timeoutValue =
-			local_map_ptr->raidMap.fpPdIoTimeoutSec;
+			cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
 		cmd->request_desc->SCSIIO.RequestFlags =
 			(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
 			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -1557,7 +1550,7 @@
 			io_request->RaidContext.nseg = 0x1;
 		}
 		io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
-		io_request->DevHandle = device_id;
+		io_request->DevHandle = cpu_to_le16(device_id);
 	} /* Not FP */
 }
 
@@ -1579,6 +1572,11 @@
 	u16 pd_index = 0;
 	struct MR_FW_RAID_MAP_ALL *local_map_ptr;
 	struct fusion_context *fusion = instance->ctrl_context;
+	u8                          span, physArm;
+	u16                         devHandle;
+	u32                         ld, arRef, pd;
+	struct MR_LD_RAID                  *raid;
+	struct RAID_CONTEXT                *pRAID_Context;
 
 	io_request = cmd->io_request;
 	device_id = MEGASAS_DEV_INDEX(instance, scmd);
@@ -1586,6 +1584,9 @@
 		+scmd->device->id;
 	local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
 
+	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+
+
 	/* Check if this is a system PD I/O */
 	if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
 	    instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
@@ -1623,15 +1624,62 @@
 					scmd->request->timeout / HZ;
 		}
 	} else {
+		if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
+			goto NonFastPath;
+
+		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+		if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io))
+			goto NonFastPath;
+
+		raid = MR_LdRaidGet(ld, local_map_ptr);
+
+		/* check if this LD is FP capable */
+		if (!(raid->capability.fpNonRWCapable))
+			/* not FP capable, send as non-FP */
+			goto NonFastPath;
+
+		/* get RAID_Context pointer */
+		pRAID_Context = &io_request->RaidContext;
+
+		/* set RAID context values */
+		pRAID_Context->regLockFlags     = REGION_TYPE_SHARED_READ;
+		pRAID_Context->timeoutValue     = raid->fpIoTimeoutForLd;
+		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
+		pRAID_Context->regLockRowLBA    = 0;
+		pRAID_Context->regLockLength    = 0;
+		pRAID_Context->configSeqNum     = raid->seqNum;
+
+		/* get the DevHandle for the PD (since this is
+		   fpNonRWCapable, this is a single disk RAID0) */
+		span = physArm = 0;
+		arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
+		pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
+		devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
+
+		/* build request descriptor */
+		cmd->request_desc->SCSIIO.RequestFlags =
+			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+			 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+		cmd->request_desc->SCSIIO.DevHandle = devHandle;
+
+		/* populate the LUN field */
+		memcpy(io_request->LUN, raid->LUN, 8);
+
+		/* build the raidScsiIO structure */
+		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+		io_request->DevHandle = devHandle;
+
+		return;
+
+NonFastPath:
 		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
-		io_request->DevHandle = device_id;
+		io_request->DevHandle = cpu_to_le16(device_id);
 		cmd->request_desc->SCSIIO.RequestFlags =
 			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
 			 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 	}
-	io_request->RaidContext.VirtualDiskTgtId = device_id;
+	io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
 	io_request->LUN[1] = scmd->device->lun;
-	io_request->DataLength = scsi_bufflen(scmd);
 }
 
 /**
@@ -1670,7 +1718,7 @@
 	 * Just the CDB length,rest of the Flags are zero
 	 * This will be modified for FP in build_ldio_fusion
 	 */
-	io_request->IoFlags = scp->cmd_len;
+	io_request->IoFlags = cpu_to_le16(scp->cmd_len);
 
 	if (megasas_is_ldio(scp))
 		megasas_build_ldio_fusion(instance, scp, cmd);
@@ -1695,17 +1743,17 @@
 
 	io_request->RaidContext.numSGE = sge_count;
 
-	io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
+	io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
 
 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
-		io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
+		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
-		io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
+		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
 
 	io_request->SGLOffset0 =
 		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
 
-	io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
+	io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
 	io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
 
 	cmd->scmd = scp;
@@ -1770,7 +1818,7 @@
 	}
 
 	req_desc = cmd->request_desc;
-	req_desc->SCSIIO.SMID = index;
+	req_desc->SCSIIO.SMID = cpu_to_le16(index);
 
 	if (cmd->io_request->ChainOffset != 0 &&
 	    cmd->io_request->ChainOffset != 0xF)
@@ -1832,7 +1880,7 @@
 	num_completed = 0;
 
 	while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
-		smid = reply_desc->SMID;
+		smid = le16_to_cpu(reply_desc->SMID);
 
 		cmd_fusion = fusion->cmd_list[smid - 1];
 
@@ -2050,12 +2098,12 @@
 				       SGL) / 4;
 	io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
 
-	mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
+	mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
 
 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
 		MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
 
-	mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME;
+	mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME);
 
 	return 0;
 }
@@ -2088,7 +2136,7 @@
 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
 					 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 
-	req_desc->SCSIIO.SMID = index;
+	req_desc->SCSIIO.SMID = cpu_to_le16(index);
 
 	return req_desc;
 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 4eb8401..35a5139 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -93,8 +93,13 @@
  */
 
 struct RAID_CONTEXT {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+	u8	nseg:4;
+	u8	Type:4;
+#else
 	u8	Type:4;
 	u8	nseg:4;
+#endif
 	u8	resvd0;
 	u16     timeoutValue;
 	u8      regLockFlags;
@@ -298,8 +303,13 @@
  * MPT RAID MFA IO Descriptor.
  */
 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+	u32     MessageAddress1:24; /* bits 31:8*/
+	u32     RequestFlags:8;
+#else
 	u32     RequestFlags:8;
 	u32     MessageAddress1:24; /* bits 31:8*/
+#endif
 	u32     MessageAddress2;      /* bits 61:32 */
 };
 
@@ -518,6 +528,19 @@
 
 struct MR_LD_RAID {
 	struct {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+		u32     reserved4:7;
+		u32	fpNonRWCapable:1;
+		u32     fpReadAcrossStripe:1;
+		u32     fpWriteAcrossStripe:1;
+		u32     fpReadCapable:1;
+		u32     fpWriteCapable:1;
+		u32     encryptionType:8;
+		u32     pdPiMode:4;
+		u32     ldPiMode:4;
+		u32     reserved5:3;
+		u32     fpCapable:1;
+#else
 		u32     fpCapable:1;
 		u32     reserved5:3;
 		u32     ldPiMode:4;
@@ -527,7 +550,9 @@
 		u32     fpReadCapable:1;
 		u32     fpWriteAcrossStripe:1;
 		u32     fpReadAcrossStripe:1;
-		u32     reserved4:8;
+		u32	fpNonRWCapable:1;
+		u32     reserved4:7;
+#endif
 	} capability;
 	u32     reserved6;
 	u64     size;
@@ -551,7 +576,9 @@
 		u32 reserved:31;
 	} flags;
 
-	u8      reserved3[0x5C];
+	u8	LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
+	u8	fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
+	u8      reserved3[0x80-0x2D]; /* 0x2D */
 };
 
 struct MR_LD_SPAN_MAP {
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
index 4c1d2e7..efb0c4c 100644
--- a/drivers/scsi/mpt3sas/Makefile
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -1,5 +1,5 @@
 # mpt3sas makefile
-obj-m += mpt3sas.o
+obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
 mpt3sas-y +=  mpt3sas_base.o     \
 		mpt3sas_config.o \
 		mpt3sas_scsih.o      \
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b58e8f8..e62d17d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2420,14 +2420,9 @@
 			}
 		}
 
-		if (modepage == 0x3F) {
-			sd_printk(KERN_ERR, sdkp, "No Caching mode page "
-				  "present\n");
-			goto defaults;
-		} else if ((buffer[offset] & 0x3f) != modepage) {
-			sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
-			goto defaults;
-		}
+		sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
+		goto defaults;
+
 	Page_found:
 		if (modepage == 8) {
 			sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index bce09a6..7210500 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -177,6 +177,7 @@
 	MASK_TASK_RESPONSE              = 0xFF00,
 	MASK_RSP_UPIU_RESULT            = 0xFFFF,
 	MASK_QUERY_DATA_SEG_LEN         = 0xFFFF,
+	MASK_RSP_UPIU_DATA_SEG_LEN	= 0xFFFF,
 	MASK_RSP_EXCEPTION_EVENT        = 0x10000,
 };
 
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b36ca9a..04884d6 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -36,9 +36,11 @@
 #include <linux/async.h>
 
 #include "ufshcd.h"
+#include "unipro.h"
 
 #define UFSHCD_ENABLE_INTRS	(UTP_TRANSFER_REQ_COMPL |\
 				 UTP_TASK_REQ_COMPL |\
+				 UIC_POWER_MODE |\
 				 UFSHCD_ERROR_MASK)
 /* UIC command timeout, unit: ms */
 #define UIC_CMD_TIMEOUT	500
@@ -56,6 +58,9 @@
 /* Expose the flag value from utp_upiu_query.value */
 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
 
+/* Interrupt aggregation default timeout, unit: 40us */
+#define INT_AGGR_DEF_TO	0x02
+
 enum {
 	UFSHCD_MAX_CHANNEL	= 0,
 	UFSHCD_MAX_ID		= 1,
@@ -78,12 +83,6 @@
 	UFSHCD_INT_CLEAR,
 };
 
-/* Interrupt aggregation options */
-enum {
-	INT_AGGR_RESET,
-	INT_AGGR_CONFIG,
-};
-
 /*
  * ufshcd_wait_for_register - wait for register value to change
  * @hba - per-adapter interface
@@ -238,6 +237,18 @@
 }
 
 /**
+ * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets UIC command argument3
+ * Returns 0 on success, non zero value on error
+ */
+static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
+{
+	return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
+}
+
+/**
  * ufshcd_get_req_rsp - returns the TR response transaction type
  * @ucd_rsp_ptr: pointer to response UPIU
  */
@@ -260,6 +271,20 @@
 	return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
 }
 
+/*
+ * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
+ *				from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * Return the data segment length.
+ */
+static inline unsigned int
+ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+	return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+		MASK_RSP_UPIU_DATA_SEG_LEN;
+}
+
 /**
  * ufshcd_is_exception_event - Check if the device raised an exception event
  * @ucd_rsp_ptr: pointer to response UPIU
@@ -276,30 +301,30 @@
 }
 
 /**
- * ufshcd_config_int_aggr - Configure interrupt aggregation values.
- *		Currently there is no use case where we want to configure
- *		interrupt aggregation dynamically. So to configure interrupt
- *		aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
- *		INT_AGGR_TIMEOUT_VALUE are used.
+ * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
  * @hba: per adapter instance
- * @option: Interrupt aggregation option
  */
 static inline void
-ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
+ufshcd_reset_intr_aggr(struct ufs_hba *hba)
 {
-	switch (option) {
-	case INT_AGGR_RESET:
-		ufshcd_writel(hba, INT_AGGR_ENABLE |
-			      INT_AGGR_COUNTER_AND_TIMER_RESET,
-			      REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
-		break;
-	case INT_AGGR_CONFIG:
-		ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
-			      INT_AGGR_COUNTER_THRESHOLD_VALUE |
-			      INT_AGGR_TIMEOUT_VALUE,
-			      REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
-		break;
-	}
+	ufshcd_writel(hba, INT_AGGR_ENABLE |
+		      INT_AGGR_COUNTER_AND_TIMER_RESET,
+		      REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
+ * @hba: per adapter instance
+ * @cnt: Interrupt aggregation counter threshold
+ * @tmout: Interrupt aggregation timeout value
+ */
+static inline void
+ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
+{
+	ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
+		      INT_AGGR_COUNTER_THLD_VAL(cnt) |
+		      INT_AGGR_TIMEOUT_VAL(tmout),
+		      REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 }
 
 /**
@@ -355,7 +380,8 @@
 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
 {
 	int len;
-	if (lrbp->sense_buffer) {
+	if (lrbp->sense_buffer &&
+	    ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
 		len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
 		memcpy(lrbp->sense_buffer,
 			lrbp->ucd_rsp_ptr->sr.sense_data,
@@ -446,6 +472,18 @@
 }
 
 /**
+ * ufshcd_get_upmcrs - Get the power mode change request status
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the UPMCRS field of HCS register
+ * Returns value of UPMCRS field
+ */
+static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
+{
+	return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
+}
+
+/**
  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
  * @hba: per adapter instance
  * @uic_cmd: UIC command
@@ -1362,6 +1400,202 @@
 }
 
 /**
+ * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @attr_set: attribute set type as uic command argument2
+ * @mib_val: setting value as uic command argument3
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+			u8 attr_set, u32 mib_val, u8 peer)
+{
+	struct uic_command uic_cmd = {0};
+	static const char *const action[] = {
+		"dme-set",
+		"dme-peer-set"
+	};
+	const char *set = action[!!peer];
+	int ret;
+
+	uic_cmd.command = peer ?
+		UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
+	uic_cmd.argument1 = attr_sel;
+	uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
+	uic_cmd.argument3 = mib_val;
+
+	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+	if (ret)
+		dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+			set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
+
+/**
+ * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @mib_val: the value of the attribute as returned by the UIC command
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+			u32 *mib_val, u8 peer)
+{
+	struct uic_command uic_cmd = {0};
+	static const char *const action[] = {
+		"dme-get",
+		"dme-peer-get"
+	};
+	const char *get = action[!!peer];
+	int ret;
+
+	uic_cmd.command = peer ?
+		UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+	uic_cmd.argument1 = attr_sel;
+
+	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+	if (ret) {
+		dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
+			get, UIC_GET_ATTR_ID(attr_sel), ret);
+		goto out;
+	}
+
+	if (mib_val)
+		*mib_val = uic_cmd.argument3;
+out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
+
+/**
+ * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
+ *				using DME_SET primitives.
+ * @hba: per adapter instance
+ * @mode: powr mode value
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+{
+	struct uic_command uic_cmd = {0};
+	struct completion pwr_done;
+	unsigned long flags;
+	u8 status;
+	int ret;
+
+	uic_cmd.command = UIC_CMD_DME_SET;
+	uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
+	uic_cmd.argument3 = mode;
+	init_completion(&pwr_done);
+
+	mutex_lock(&hba->uic_cmd_mutex);
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->pwr_done = &pwr_done;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
+	if (ret) {
+		dev_err(hba->dev,
+			"pwr mode change with mode 0x%x uic error %d\n",
+			mode, ret);
+		goto out;
+	}
+
+	if (!wait_for_completion_timeout(hba->pwr_done,
+					 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+		dev_err(hba->dev,
+			"pwr mode change with mode 0x%x completion timeout\n",
+			mode);
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	status = ufshcd_get_upmcrs(hba);
+	if (status != PWR_LOCAL) {
+		dev_err(hba->dev,
+			"pwr mode change failed, host umpcrs:0x%x\n",
+			status);
+		ret = (status != PWR_OK) ? status : -1;
+	}
+out:
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->pwr_done = NULL;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	mutex_unlock(&hba->uic_cmd_mutex);
+	return ret;
+}
+
+/**
+ * ufshcd_config_max_pwr_mode - Set & Change power mode with
+ *	maximum capability attribute information.
+ * @hba: per adapter instance
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
+{
+	enum {RX = 0, TX = 1};
+	u32 lanes[] = {1, 1};
+	u32 gear[] = {1, 1};
+	u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
+	int ret;
+
+	/* Get the connected lane count */
+	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
+
+	/*
+	 * First, get the maximum gears of HS speed.
+	 * If a zero value, it means there is no HSGEAR capability.
+	 * Then, get the maximum gears of PWM speed.
+	 */
+	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
+	if (!gear[RX]) {
+		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
+		pwr[RX] = SLOWAUTO_MODE;
+	}
+
+	ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
+	if (!gear[TX]) {
+		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+				    &gear[TX]);
+		pwr[TX] = SLOWAUTO_MODE;
+	}
+
+	/*
+	 * Configure attributes for power mode change with below.
+	 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
+	 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
+	 * - PA_HSSERIES
+	 */
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
+	if (pwr[RX] == FASTAUTO_MODE)
+		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
+	if (pwr[TX] == FASTAUTO_MODE)
+		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+
+	if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
+		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
+
+	ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
+	if (ret)
+		dev_err(hba->dev,
+			"pwr_mode: power mode change failed %d\n", ret);
+
+	return ret;
+}
+
+/**
  * ufshcd_complete_dev_init() - checks device readiness
  * hba: per-adapter instance
  *
@@ -1442,7 +1676,7 @@
 	ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
 
 	/* Configure interrupt aggregation */
-	ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
+	ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
 
 	/* Configure UTRL and UTMRL base address registers */
 	ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
@@ -1788,32 +2022,24 @@
 	int result = 0;
 
 	switch (scsi_status) {
+	case SAM_STAT_CHECK_CONDITION:
+		ufshcd_copy_sense_data(lrbp);
 	case SAM_STAT_GOOD:
 		result |= DID_OK << 16 |
 			  COMMAND_COMPLETE << 8 |
-			  SAM_STAT_GOOD;
-		break;
-	case SAM_STAT_CHECK_CONDITION:
-		result |= DID_OK << 16 |
-			  COMMAND_COMPLETE << 8 |
-			  SAM_STAT_CHECK_CONDITION;
-		ufshcd_copy_sense_data(lrbp);
-		break;
-	case SAM_STAT_BUSY:
-		result |= SAM_STAT_BUSY;
+			  scsi_status;
 		break;
 	case SAM_STAT_TASK_SET_FULL:
-
 		/*
 		 * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
 		 * depth needs to be adjusted to the exact number of
 		 * outstanding commands the LUN can handle at any given time.
 		 */
 		ufshcd_adjust_lun_qdepth(lrbp->cmd);
-		result |= SAM_STAT_TASK_SET_FULL;
-		break;
+	case SAM_STAT_BUSY:
 	case SAM_STAT_TASK_ABORTED:
-		result |= SAM_STAT_TASK_ABORTED;
+		ufshcd_copy_sense_data(lrbp);
+		result |= scsi_status;
 		break;
 	default:
 		result |= DID_ERROR << 16;
@@ -1898,14 +2124,20 @@
 /**
  * ufshcd_uic_cmd_compl - handle completion of uic command
  * @hba: per adapter instance
+ * @intr_status: interrupt status generated by the controller
  */
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba)
+static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
 {
-	if (hba->active_uic_cmd) {
+	if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
 		hba->active_uic_cmd->argument2 |=
 			ufshcd_get_uic_cmd_result(hba);
+		hba->active_uic_cmd->argument3 =
+			ufshcd_get_dme_attr_val(hba);
 		complete(&hba->active_uic_cmd->done);
 	}
+
+	if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
+		complete(hba->pwr_done);
 }
 
 /**
@@ -1960,7 +2192,7 @@
 
 	/* Reset interrupt aggregation counters */
 	if (int_aggr_reset)
-		ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
+		ufshcd_reset_intr_aggr(hba);
 }
 
 /**
@@ -2251,8 +2483,8 @@
 	if (hba->errors)
 		ufshcd_err_handler(hba);
 
-	if (intr_status & UIC_COMMAND_COMPL)
-		ufshcd_uic_cmd_compl(hba);
+	if (intr_status & UFSHCD_UIC_MASK)
+		ufshcd_uic_cmd_compl(hba, intr_status);
 
 	if (intr_status & UTP_TASK_REQ_COMPL)
 		ufshcd_tmc_handler(hba);
@@ -2494,6 +2726,8 @@
 	if (ret)
 		goto out;
 
+	ufshcd_config_max_pwr_mode(hba);
+
 	ret = ufshcd_verify_dev_init(hba);
 	if (ret)
 		goto out;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 59c9c48..577679a 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -175,6 +175,7 @@
  * @active_uic_cmd: handle of active UIC command
  * @uic_cmd_mutex: mutex for uic command
  * @ufshcd_tm_wait_queue: wait queue for task management
+ * @pwr_done: completion for power mode change
  * @tm_condition: condition variable for task management
  * @ufshcd_state: UFSHCD states
  * @intr_mask: Interrupt Mask Bits
@@ -219,6 +220,8 @@
 	wait_queue_head_t ufshcd_tm_wait_queue;
 	unsigned long tm_condition;
 
+	struct completion *pwr_done;
+
 	u32 ufshcd_state;
 	u32 intr_mask;
 	u16 ee_ctrl_mask;
@@ -263,4 +266,55 @@
 extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
 extern int ufshcd_runtime_resume(struct ufs_hba *hba);
 extern int ufshcd_runtime_idle(struct ufs_hba *hba);
+extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+			       u8 attr_set, u32 mib_val, u8 peer);
+extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+			       u32 *mib_val, u8 peer);
+
+/* UIC command interfaces for DME primitives */
+#define DME_LOCAL	0
+#define DME_PEER	1
+#define ATTR_SET_NOR	0	/* NORMAL */
+#define ATTR_SET_ST	1	/* STATIC */
+
+static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
+				 u32 mib_val)
+{
+	return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+				   mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
+				    u32 mib_val)
+{
+	return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+				   mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
+				      u32 mib_val)
+{
+	return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+				   mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
+					 u32 mib_val)
+{
+	return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+				   mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_get(struct ufs_hba *hba,
+				 u32 attr_sel, u32 *mib_val)
+{
+	return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
+				      u32 attr_sel, u32 *mib_val)
+{
+	return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
+}
+
 #endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index f1e1b74..0475c66 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -124,6 +124,9 @@
 #define CONTROLLER_FATAL_ERROR			UFS_BIT(16)
 #define SYSTEM_BUS_FATAL_ERROR			UFS_BIT(17)
 
+#define UFSHCD_UIC_MASK		(UIC_COMMAND_COMPL |\
+				 UIC_POWER_MODE)
+
 #define UFSHCD_ERROR_MASK	(UIC_ERROR |\
 				DEVICE_FATAL_ERROR |\
 				CONTROLLER_FATAL_ERROR |\
@@ -142,6 +145,15 @@
 #define DEVICE_ERROR_INDICATOR			UFS_BIT(5)
 #define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK	UFS_MASK(0x7, 8)
 
+enum {
+	PWR_OK		= 0x0,
+	PWR_LOCAL	= 0x01,
+	PWR_REMOTE	= 0x02,
+	PWR_BUSY	= 0x03,
+	PWR_ERROR_CAP	= 0x04,
+	PWR_FATAL_ERROR	= 0x05,
+};
+
 /* HCE - Host Controller Enable 34h */
 #define CONTROLLER_ENABLE	UFS_BIT(0)
 #define CONTROLLER_DISABLE	0x0
@@ -191,6 +203,12 @@
 #define CONFIG_RESULT_CODE_MASK		0xFF
 #define GENERIC_ERROR_CODE_MASK		0xFF
 
+#define UIC_ARG_MIB_SEL(attr, sel)	((((attr) & 0xFFFF) << 16) |\
+					 ((sel) & 0xFFFF))
+#define UIC_ARG_MIB(attr)		UIC_ARG_MIB_SEL(attr, 0)
+#define UIC_ARG_ATTR_TYPE(t)		(((t) & 0xFF) << 16)
+#define UIC_GET_ATTR_ID(v)		(((v) >> 16) & 0xFFFF)
+
 /* UIC Commands */
 enum {
 	UIC_CMD_DME_GET			= 0x01,
@@ -226,8 +244,8 @@
 
 #define MASK_UIC_COMMAND_RESULT			0xFF
 
-#define INT_AGGR_COUNTER_THRESHOLD_VALUE	(0x1F << 8)
-#define INT_AGGR_TIMEOUT_VALUE			(0x02)
+#define INT_AGGR_COUNTER_THLD_VAL(c)	(((c) & 0x1F) << 8)
+#define INT_AGGR_TIMEOUT_VAL(t)		(((t) & 0xFF) << 0)
 
 /* Interrupt disable masks */
 enum {
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
new file mode 100644
index 0000000..0bb8041
--- /dev/null
+++ b/drivers/scsi/ufs/unipro.h
@@ -0,0 +1,151 @@
+/*
+ * drivers/scsi/ufs/unipro.h
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _UNIPRO_H_
+#define _UNIPRO_H_
+
+/*
+ * PHY Adpater attributes
+ */
+#define PA_ACTIVETXDATALANES	0x1560
+#define PA_ACTIVERXDATALANES	0x1580
+#define PA_TXTRAILINGCLOCKS	0x1564
+#define PA_PHY_TYPE		0x1500
+#define PA_AVAILTXDATALANES	0x1520
+#define PA_AVAILRXDATALANES	0x1540
+#define PA_MINRXTRAILINGCLOCKS	0x1543
+#define PA_TXPWRSTATUS		0x1567
+#define PA_RXPWRSTATUS		0x1582
+#define PA_TXFORCECLOCK		0x1562
+#define PA_TXPWRMODE		0x1563
+#define PA_LEGACYDPHYESCDL	0x1570
+#define PA_MAXTXSPEEDFAST	0x1521
+#define PA_MAXTXSPEEDSLOW	0x1522
+#define PA_MAXRXSPEEDFAST	0x1541
+#define PA_MAXRXSPEEDSLOW	0x1542
+#define PA_TXLINKSTARTUPHS	0x1544
+#define PA_TXSPEEDFAST		0x1565
+#define PA_TXSPEEDSLOW		0x1566
+#define PA_REMOTEVERINFO	0x15A0
+#define PA_TXGEAR		0x1568
+#define PA_TXTERMINATION	0x1569
+#define PA_HSSERIES		0x156A
+#define PA_PWRMODE		0x1571
+#define PA_RXGEAR		0x1583
+#define PA_RXTERMINATION	0x1584
+#define PA_MAXRXPWMGEAR		0x1586
+#define PA_MAXRXHSGEAR		0x1587
+#define PA_RXHSUNTERMCAP	0x15A5
+#define PA_RXLSTERMCAP		0x15A6
+#define PA_PACPREQTIMEOUT	0x1590
+#define PA_PACPREQEOBTIMEOUT	0x1591
+#define PA_HIBERN8TIME		0x15A7
+#define PA_LOCALVERINFO		0x15A9
+#define PA_TACTIVATE		0x15A8
+#define PA_PACPFRAMECOUNT	0x15C0
+#define PA_PACPERRORCOUNT	0x15C1
+#define PA_PHYTESTCONTROL	0x15C2
+#define PA_PWRMODEUSERDATA0	0x15B0
+#define PA_PWRMODEUSERDATA1	0x15B1
+#define PA_PWRMODEUSERDATA2	0x15B2
+#define PA_PWRMODEUSERDATA3	0x15B3
+#define PA_PWRMODEUSERDATA4	0x15B4
+#define PA_PWRMODEUSERDATA5	0x15B5
+#define PA_PWRMODEUSERDATA6	0x15B6
+#define PA_PWRMODEUSERDATA7	0x15B7
+#define PA_PWRMODEUSERDATA8	0x15B8
+#define PA_PWRMODEUSERDATA9	0x15B9
+#define PA_PWRMODEUSERDATA10	0x15BA
+#define PA_PWRMODEUSERDATA11	0x15BB
+#define PA_CONNECTEDTXDATALANES	0x1561
+#define PA_CONNECTEDRXDATALANES	0x1581
+#define PA_LOGICALLANEMAP	0x15A1
+#define PA_SLEEPNOCONFIGTIME	0x15A2
+#define PA_STALLNOCONFIGTIME	0x15A3
+#define PA_SAVECONFIGTIME	0x15A4
+
+/* PA power modes */
+enum {
+	FAST_MODE	= 1,
+	SLOW_MODE	= 2,
+	FASTAUTO_MODE	= 4,
+	SLOWAUTO_MODE	= 5,
+	UNCHANGED	= 7,
+};
+
+/* PA TX/RX Frequency Series */
+enum {
+	PA_HS_MODE_A	= 1,
+	PA_HS_MODE_B	= 2,
+};
+
+/*
+ * Data Link Layer Attributes
+ */
+#define DL_TC0TXFCTHRESHOLD	0x2040
+#define DL_FC0PROTTIMEOUTVAL	0x2041
+#define DL_TC0REPLAYTIMEOUTVAL	0x2042
+#define DL_AFC0REQTIMEOUTVAL	0x2043
+#define DL_AFC0CREDITTHRESHOLD	0x2044
+#define DL_TC0OUTACKTHRESHOLD	0x2045
+#define DL_TC1TXFCTHRESHOLD	0x2060
+#define DL_FC1PROTTIMEOUTVAL	0x2061
+#define DL_TC1REPLAYTIMEOUTVAL	0x2062
+#define DL_AFC1REQTIMEOUTVAL	0x2063
+#define DL_AFC1CREDITTHRESHOLD	0x2064
+#define DL_TC1OUTACKTHRESHOLD	0x2065
+#define DL_TXPREEMPTIONCAP	0x2000
+#define DL_TC0TXMAXSDUSIZE	0x2001
+#define DL_TC0RXINITCREDITVAL	0x2002
+#define DL_TC0TXBUFFERSIZE	0x2005
+#define DL_PEERTC0PRESENT	0x2046
+#define DL_PEERTC0RXINITCREVAL	0x2047
+#define DL_TC1TXMAXSDUSIZE	0x2003
+#define DL_TC1RXINITCREDITVAL	0x2004
+#define DL_TC1TXBUFFERSIZE	0x2006
+#define DL_PEERTC1PRESENT	0x2066
+#define DL_PEERTC1RXINITCREVAL	0x2067
+
+/*
+ * Network Layer Attributes
+ */
+#define N_DEVICEID		0x3000
+#define N_DEVICEID_VALID	0x3001
+#define N_TC0TXMAXSDUSIZE	0x3020
+#define N_TC1TXMAXSDUSIZE	0x3021
+
+/*
+ * Transport Layer Attributes
+ */
+#define T_NUMCPORTS		0x4000
+#define T_NUMTESTFEATURES	0x4001
+#define T_CONNECTIONSTATE	0x4020
+#define T_PEERDEVICEID		0x4021
+#define T_PEERCPORTID		0x4022
+#define T_TRAFFICCLASS		0x4023
+#define T_PROTOCOLID		0x4024
+#define T_CPORTFLAGS		0x4025
+#define T_TXTOKENVALUE		0x4026
+#define T_RXTOKENVALUE		0x4027
+#define T_LOCALBUFFERSPACE	0x4028
+#define T_PEERBUFFERSPACE	0x4029
+#define T_CREDITSTOSEND		0x402A
+#define T_CPORTMODE		0x402B
+#define T_TC0TXMAXSDUSIZE	0x4060
+#define T_TC1TXMAXSDUSIZE	0x4061
+
+/* Boolean attribute values */
+enum {
+	FALSE = 0,
+	TRUE,
+};
+
+#endif /* _UNIPRO_H_ */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c3549ed..1e86823 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -592,7 +592,7 @@
 	return bdev;
 }
 
-static inline int sb_is_blkdev_sb(struct super_block *sb)
+int sb_is_blkdev_sb(struct super_block *sb)
 {
 	return sb == blockdev_superblock;
 }
diff --git a/fs/dcache.c b/fs/dcache.c
index 1bd4614..4100030 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -357,15 +357,80 @@
 }
 
 /*
+ * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
+ * is in use - which includes both the "real" per-superblock
+ * LRU list _and_ the DCACHE_SHRINK_LIST use.
+ *
+ * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
+ * on the shrink list (ie not on the superblock LRU list).
+ *
+ * The per-cpu "nr_dentry_unused" counters are updated with
+ * the DCACHE_LRU_LIST bit.
+ *
+ * These helper functions make sure we always follow the
+ * rules. d_lock must be held by the caller.
+ */
+#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
+static void d_lru_add(struct dentry *dentry)
+{
+	D_FLAG_VERIFY(dentry, 0);
+	dentry->d_flags |= DCACHE_LRU_LIST;
+	this_cpu_inc(nr_dentry_unused);
+	WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
+}
+
+static void d_lru_del(struct dentry *dentry)
+{
+	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
+	dentry->d_flags &= ~DCACHE_LRU_LIST;
+	this_cpu_dec(nr_dentry_unused);
+	WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
+}
+
+static void d_shrink_del(struct dentry *dentry)
+{
+	D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
+	list_del_init(&dentry->d_lru);
+	dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
+	this_cpu_dec(nr_dentry_unused);
+}
+
+static void d_shrink_add(struct dentry *dentry, struct list_head *list)
+{
+	D_FLAG_VERIFY(dentry, 0);
+	list_add(&dentry->d_lru, list);
+	dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
+	this_cpu_inc(nr_dentry_unused);
+}
+
+/*
+ * These can only be called under the global LRU lock, ie during the
+ * callback for freeing the LRU list. "isolate" removes it from the
+ * LRU lists entirely, while shrink_move moves it to the indicated
+ * private list.
+ */
+static void d_lru_isolate(struct dentry *dentry)
+{
+	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
+	dentry->d_flags &= ~DCACHE_LRU_LIST;
+	this_cpu_dec(nr_dentry_unused);
+	list_del_init(&dentry->d_lru);
+}
+
+static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list)
+{
+	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
+	dentry->d_flags |= DCACHE_SHRINK_LIST;
+	list_move_tail(&dentry->d_lru, list);
+}
+
+/*
  * dentry_lru_(add|del)_list) must be called with d_lock held.
  */
 static void dentry_lru_add(struct dentry *dentry)
 {
-	if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) {
-		if (list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru))
-			this_cpu_inc(nr_dentry_unused);
-		dentry->d_flags |= DCACHE_LRU_LIST;
-	}
+	if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
+		d_lru_add(dentry);
 }
 
 /*
@@ -377,15 +442,11 @@
  */
 static void dentry_lru_del(struct dentry *dentry)
 {
-	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
-		list_del_init(&dentry->d_lru);
-		dentry->d_flags &= ~DCACHE_SHRINK_LIST;
-		return;
+	if (dentry->d_flags & DCACHE_LRU_LIST) {
+		if (dentry->d_flags & DCACHE_SHRINK_LIST)
+			return d_shrink_del(dentry);
+		d_lru_del(dentry);
 	}
-
-	if (list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru))
-		this_cpu_dec(nr_dentry_unused);
-	dentry->d_flags &= ~DCACHE_LRU_LIST;
 }
 
 /**
@@ -837,6 +898,12 @@
 		dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
 		if (&dentry->d_lru == list)
 			break; /* empty */
+
+		/*
+		 * Get the dentry lock, and re-verify that the dentry is
+		 * this on the shrinking list. If it is, we know that
+		 * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set.
+		 */
 		spin_lock(&dentry->d_lock);
 		if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
 			spin_unlock(&dentry->d_lock);
@@ -848,8 +915,7 @@
 		 * to the LRU here, so we can simply remove it from the list
 		 * here regardless of whether it is referenced or not.
 		 */
-		list_del_init(&dentry->d_lru);
-		dentry->d_flags &= ~DCACHE_SHRINK_LIST;
+		d_shrink_del(dentry);
 
 		/*
 		 * We found an inuse dentry which was not removed from
@@ -861,12 +927,20 @@
 		}
 		rcu_read_unlock();
 
+		/*
+		 * If 'try_to_prune()' returns a dentry, it will
+		 * be the same one we passed in, and d_lock will
+		 * have been held the whole time, so it will not
+		 * have been added to any other lists. We failed
+		 * to get the inode lock.
+		 *
+		 * We just add it back to the shrink list.
+		 */
 		dentry = try_prune_one_dentry(dentry);
 
 		rcu_read_lock();
 		if (dentry) {
-			dentry->d_flags |= DCACHE_SHRINK_LIST;
-			list_add(&dentry->d_lru, list);
+			d_shrink_add(dentry, list);
 			spin_unlock(&dentry->d_lock);
 		}
 	}
@@ -894,7 +968,7 @@
 	 * another pass through the LRU.
 	 */
 	if (dentry->d_lockref.count) {
-		list_del_init(&dentry->d_lru);
+		d_lru_isolate(dentry);
 		spin_unlock(&dentry->d_lock);
 		return LRU_REMOVED;
 	}
@@ -925,9 +999,7 @@
 		return LRU_ROTATE;
 	}
 
-	dentry->d_flags |= DCACHE_SHRINK_LIST;
-	list_move_tail(&dentry->d_lru, freeable);
-	this_cpu_dec(nr_dentry_unused);
+	d_lru_shrink_move(dentry, freeable);
 	spin_unlock(&dentry->d_lock);
 
 	return LRU_REMOVED;
@@ -972,9 +1044,7 @@
 	if (!spin_trylock(&dentry->d_lock))
 		return LRU_SKIP;
 
-	dentry->d_flags |= DCACHE_SHRINK_LIST;
-	list_move_tail(&dentry->d_lru, freeable);
-	this_cpu_dec(nr_dentry_unused);
+	d_lru_shrink_move(dentry, freeable);
 	spin_unlock(&dentry->d_lock);
 
 	return LRU_REMOVED;
@@ -1362,9 +1432,13 @@
 	if (dentry->d_lockref.count) {
 		dentry_lru_del(dentry);
 	} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
-		dentry_lru_del(dentry);
-		list_add_tail(&dentry->d_lru, &data->dispose);
-		dentry->d_flags |= DCACHE_SHRINK_LIST;
+		/*
+		 * We can't use d_lru_shrink_move() because we
+		 * need to get the global LRU lock and do the
+		 * LRU accounting.
+		 */
+		d_lru_del(dentry);
+		d_shrink_add(dentry, &data->dispose);
 		data->found++;
 		ret = D_WALK_NORETRY;
 	}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 30f6f27..9f4935b 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -69,7 +69,7 @@
 {
 	struct super_block *sb = inode->i_sb;
 
-	if (strcmp(sb->s_type->name, "bdev") == 0)
+	if (sb_is_blkdev_sb(sb))
 		return inode->i_mapping->backing_dev_info;
 
 	return sb->s_bdi;
@@ -251,11 +251,13 @@
 		if (work->older_than_this &&
 		    inode_dirtied_after(inode, *work->older_than_this))
 			break;
+		list_move(&inode->i_wb_list, &tmp);
+		moved++;
+		if (sb_is_blkdev_sb(inode->i_sb))
+			continue;
 		if (sb && sb != inode->i_sb)
 			do_sb_sort = 1;
 		sb = inode->i_sb;
-		list_move(&inode->i_wb_list, &tmp);
-		moved++;
 	}
 
 	/* just one sb in list, splice to dispatch_queue and we're done */
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 7f60e90..6e025e0 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2587,10 +2587,11 @@
 		return -EROFS;
 
 	failing = power_cut_emulated(c, lnum, 1);
-	if (failing)
+	if (failing) {
 		len = corrupt_data(c, buf, len);
-	ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)",
-		   len, lnum, offs);
+		ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)",
+			   len, lnum, offs);
+	}
 	err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
 	if (err)
 		return err;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a4acd3c..3f40547 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2069,6 +2069,7 @@
 extern void emergency_thaw_all(void);
 extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
 extern int fsync_bdev(struct block_device *);
+extern int sb_is_blkdev_sb(struct super_block *sb);
 #else
 static inline void bd_forget(struct inode *inode) {}
 static inline int sync_blockdev(struct block_device *bdev) { return 0; }
@@ -2088,6 +2089,11 @@
 static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
 {
 }
+
+static inline int sb_is_blkdev_sb(struct super_block *sb)
+{
+	return 0;
+}
 #endif
 extern int sync_filesystem(struct super_block *);
 extern const struct file_operations def_blk_fops;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index bc95b2b..97fbecd 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -758,6 +758,7 @@
 #define PCI_DEVICE_ID_HP_CISSE		0x323a
 #define PCI_DEVICE_ID_HP_CISSF		0x323b
 #define PCI_DEVICE_ID_HP_CISSH		0x323c
+#define PCI_DEVICE_ID_HP_CISSI		0x3239
 #define PCI_DEVICE_ID_HP_ZX2_IOC	0x4031
 
 #define PCI_VENDOR_ID_PCTECH		0x1042
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6c5cc0e..74f1058 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -4,6 +4,8 @@
  * (C) SGI 2006, Christoph Lameter
  * 	Cleaned up and restructured to ease the addition of alternative
  * 	implementations of SLAB allocators.
+ * (C) Linux Foundation 2008-2013
+ *      Unified interface for all slab allocators
  */
 
 #ifndef _LINUX_SLAB_H
@@ -94,6 +96,7 @@
 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
 				(unsigned long)ZERO_SIZE_PTR)
 
+#include <linux/kmemleak.h>
 
 struct mem_cgroup;
 /*
@@ -289,6 +292,57 @@
 }
 #endif /* !CONFIG_SLOB */
 
+void *__kmalloc(size_t size, gfp_t flags);
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+#else
+static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+	return __kmalloc(size, flags);
+}
+
+static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
+{
+	return kmem_cache_alloc(s, flags);
+}
+#endif
+
+#ifdef CONFIG_TRACING
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
+
+#ifdef CONFIG_NUMA
+extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+					   gfp_t gfpflags,
+					   int node, size_t size);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+			      gfp_t gfpflags,
+			      int node, size_t size)
+{
+	return kmem_cache_alloc_trace(s, gfpflags, size);
+}
+#endif /* CONFIG_NUMA */
+
+#else /* CONFIG_TRACING */
+static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
+		gfp_t flags, size_t size)
+{
+	return kmem_cache_alloc(s, flags);
+}
+
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+			      gfp_t gfpflags,
+			      int node, size_t size)
+{
+	return kmem_cache_alloc_node(s, gfpflags, node);
+}
+#endif /* CONFIG_TRACING */
+
 #ifdef CONFIG_SLAB
 #include <linux/slab_def.h>
 #endif
@@ -297,10 +351,61 @@
 #include <linux/slub_def.h>
 #endif
 
-#ifdef CONFIG_SLOB
-#include <linux/slob_def.h>
+static __always_inline void *
+kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+{
+	void *ret;
+
+	flags |= (__GFP_COMP | __GFP_KMEMCG);
+	ret = (void *) __get_free_pages(flags, order);
+	kmemleak_alloc(ret, size, 1, flags);
+	return ret;
+}
+
+#ifdef CONFIG_TRACING
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
+#else
+static __always_inline void *
+kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+	return kmalloc_order(size, flags, order);
+}
 #endif
 
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+	unsigned int order = get_order(size);
+	return kmalloc_order_trace(size, flags, order);
+}
+
+/**
+ * kmalloc - allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kcalloc).
+ *
+ * kmalloc is the normal method of allocating memory
+ * for objects smaller than page size in the kernel.
+ */
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+	if (__builtin_constant_p(size)) {
+		if (size > KMALLOC_MAX_CACHE_SIZE)
+			return kmalloc_large(size, flags);
+#ifndef CONFIG_SLOB
+		if (!(flags & GFP_DMA)) {
+			int index = kmalloc_index(size);
+
+			if (!index)
+				return ZERO_SIZE_PTR;
+
+			return kmem_cache_alloc_trace(kmalloc_caches[index],
+					flags, size);
+		}
+#endif
+	}
+	return __kmalloc(size, flags);
+}
+
 /*
  * Determine size used for the nth kmalloc cache.
  * return size or 0 if a kmalloc cache for that
@@ -321,6 +426,23 @@
 	return 0;
 }
 
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+#ifndef CONFIG_SLOB
+	if (__builtin_constant_p(size) &&
+		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
+		int i = kmalloc_index(size);
+
+		if (!i)
+			return ZERO_SIZE_PTR;
+
+		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
+						flags, node, size);
+	}
+#endif
+	return __kmalloc_node(size, flags, node);
+}
+
 /*
  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
  * Intended for arches that get misalignment faults even for 64 bit integer
@@ -451,36 +573,6 @@
 	return kmalloc_array(n, size, flags | __GFP_ZERO);
 }
 
-#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
-/**
- * kmalloc_node - allocate memory from a specific node
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kmalloc).
- * @node: node to allocate from.
- *
- * kmalloc() for non-local nodes, used to allocate from a specific node
- * if available. Equivalent to kmalloc() in the non-NUMA single-node
- * case.
- */
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
-	return kmalloc(size, flags);
-}
-
-static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
-	return __kmalloc(size, flags);
-}
-
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-
-static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
-					gfp_t flags, int node)
-{
-	return kmem_cache_alloc(cachep, flags);
-}
-#endif /* !CONFIG_NUMA && !CONFIG_SLOB */
-
 /*
  * kmalloc_track_caller is a special version of kmalloc that records the
  * calling function of the routine calling it for slab leak tracking instead
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cd40158..e9346b4 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -3,20 +3,6 @@
 
 /*
  * Definitions unique to the original Linux SLAB allocator.
- *
- * What we provide here is a way to optimize the frequent kmalloc
- * calls in the kernel by selecting the appropriate general cache
- * if kmalloc was called with a size that can be established at
- * compile time.
- */
-
-#include <linux/init.h>
-#include <linux/compiler.h>
-
-/*
- * struct kmem_cache
- *
- * manages a cache.
  */
 
 struct kmem_cache {
@@ -102,96 +88,4 @@
 	 */
 };
 
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
-#else
-static __always_inline void *
-kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
-{
-	return kmem_cache_alloc(cachep, flags);
-}
-#endif
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
-	struct kmem_cache *cachep;
-	void *ret;
-
-	if (__builtin_constant_p(size)) {
-		int i;
-
-		if (!size)
-			return ZERO_SIZE_PTR;
-
-		if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
-			return NULL;
-
-		i = kmalloc_index(size);
-
-#ifdef CONFIG_ZONE_DMA
-		if (flags & GFP_DMA)
-			cachep = kmalloc_dma_caches[i];
-		else
-#endif
-			cachep = kmalloc_caches[i];
-
-		ret = kmem_cache_alloc_trace(cachep, flags, size);
-
-		return ret;
-	}
-	return __kmalloc(size, flags);
-}
-
-#ifdef CONFIG_NUMA
-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
-extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
-					 gfp_t flags,
-					 int nodeid,
-					 size_t size);
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
-			    gfp_t flags,
-			    int nodeid,
-			    size_t size)
-{
-	return kmem_cache_alloc_node(cachep, flags, nodeid);
-}
-#endif
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
-	struct kmem_cache *cachep;
-
-	if (__builtin_constant_p(size)) {
-		int i;
-
-		if (!size)
-			return ZERO_SIZE_PTR;
-
-		if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
-			return NULL;
-
-		i = kmalloc_index(size);
-
-#ifdef CONFIG_ZONE_DMA
-		if (flags & GFP_DMA)
-			cachep = kmalloc_dma_caches[i];
-		else
-#endif
-			cachep = kmalloc_caches[i];
-
-		return kmem_cache_alloc_node_trace(cachep, flags, node, size);
-	}
-	return __kmalloc_node(size, flags, node);
-}
-
-#endif	/* CONFIG_NUMA */
-
 #endif	/* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
deleted file mode 100644
index 095a5a4..0000000
--- a/include/linux/slob_def.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __LINUX_SLOB_DEF_H
-#define __LINUX_SLOB_DEF_H
-
-#include <linux/numa.h>
-
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
-					      gfp_t flags)
-{
-	return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
-}
-
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
-	return __kmalloc_node(size, flags, node);
-}
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
-	return __kmalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
-{
-	return kmalloc(size, flags);
-}
-
-#endif /* __LINUX_SLOB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 027276f..cc0b67e 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -6,14 +6,8 @@
  *
  * (C) 2007 SGI, Christoph Lameter
  */
-#include <linux/types.h>
-#include <linux/gfp.h>
-#include <linux/bug.h>
-#include <linux/workqueue.h>
 #include <linux/kobject.h>
 
-#include <linux/kmemleak.h>
-
 enum stat_item {
 	ALLOC_FASTPATH,		/* Allocation from cpu slab */
 	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
@@ -104,108 +98,4 @@
 	struct kmem_cache_node *node[MAX_NUMNODES];
 };
 
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
-
-static __always_inline void *
-kmalloc_order(size_t size, gfp_t flags, unsigned int order)
-{
-	void *ret;
-
-	flags |= (__GFP_COMP | __GFP_KMEMCG);
-	ret = (void *) __get_free_pages(flags, order);
-	kmemleak_alloc(ret, size, 1, flags);
-	return ret;
-}
-
-/**
- * Calling this on allocated memory will check that the memory
- * is expected to be in use, and print warnings if not.
- */
-#ifdef CONFIG_SLUB_DEBUG
-extern bool verify_mem_not_deleted(const void *x);
-#else
-static inline bool verify_mem_not_deleted(const void *x)
-{
-	return true;
-}
-#endif
-
-#ifdef CONFIG_TRACING
-extern void *
-kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
-#else
-static __always_inline void *
-kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
-{
-	return kmem_cache_alloc(s, gfpflags);
-}
-
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
-	return kmalloc_order(size, flags, order);
-}
-#endif
-
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
-{
-	unsigned int order = get_order(size);
-	return kmalloc_order_trace(size, flags, order);
-}
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
-	if (__builtin_constant_p(size)) {
-		if (size > KMALLOC_MAX_CACHE_SIZE)
-			return kmalloc_large(size, flags);
-
-		if (!(flags & GFP_DMA)) {
-			int index = kmalloc_index(size);
-
-			if (!index)
-				return ZERO_SIZE_PTR;
-
-			return kmem_cache_alloc_trace(kmalloc_caches[index],
-					flags, size);
-		}
-	}
-	return __kmalloc(size, flags);
-}
-
-#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
-					   gfp_t gfpflags,
-					   int node, size_t size);
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
-			      gfp_t gfpflags,
-			      int node, size_t size)
-{
-	return kmem_cache_alloc_node(s, gfpflags, node);
-}
-#endif
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
-	if (__builtin_constant_p(size) &&
-		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
-		int index = kmalloc_index(size);
-
-		if (!index)
-			return ZERO_SIZE_PTR;
-
-		return kmem_cache_alloc_node_trace(kmalloc_caches[index],
-			       flags, node, size);
-	}
-	return __kmalloc_node(size, flags, node);
-}
-#endif
-
 #endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index d08abf9..a372627 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -152,6 +152,7 @@
 #define EVIOCGEFFECTS		_IOR('E', 0x84, int)			/* Report number of effects playable at the same time */
 
 #define EVIOCGRAB		_IOW('E', 0x90, int)			/* Grab/Release device */
+#define EVIOCREVOKE		_IOW('E', 0x91, int)			/* Revoke device access */
 
 #define EVIOCSCLOCKID		_IOW('E', 0xa0, int)			/* Set clockid to be used for timestamps */
 
diff --git a/init/Kconfig b/init/Kconfig
index 18bd9e3..3ecd8a1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1602,7 +1602,7 @@
 
 config SLUB_CPU_PARTIAL
 	default y
-	depends on SLUB
+	depends on SLUB && SMP
 	bool "SLUB per cpu partial cache"
 	help
 	  Per cpu partial caches accellerate objects allocation and freeing
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 538bade..a344327 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -19,6 +19,7 @@
 #include <asm/tlbflush.h>
 #include <asm/page.h>
 #include <linux/memcontrol.h>
+#include <trace/events/kmem.h>
 
 #include "slab.h"
 
@@ -373,7 +374,7 @@
 {
 	int index;
 
-	if (size > KMALLOC_MAX_SIZE) {
+	if (unlikely(size > KMALLOC_MAX_SIZE)) {
 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
 		return NULL;
 	}
@@ -495,6 +496,15 @@
 }
 #endif /* !CONFIG_SLOB */
 
+#ifdef CONFIG_TRACING
+void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+	void *ret = kmalloc_order(size, flags, order);
+	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
+	return ret;
+}
+EXPORT_SYMBOL(kmalloc_order_trace);
+#endif
 
 #ifdef CONFIG_SLABINFO
 
diff --git a/mm/slob.c b/mm/slob.c
index 91bd3f2..4bf8809 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -462,11 +462,11 @@
 	return ret;
 }
 
-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+void *__kmalloc(size_t size, gfp_t gfp)
 {
-	return __do_kmalloc_node(size, gfp, node, _RET_IP_);
+	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
 }
-EXPORT_SYMBOL(__kmalloc_node);
+EXPORT_SYMBOL(__kmalloc);
 
 #ifdef CONFIG_TRACING
 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
@@ -534,7 +534,7 @@
 	return 0;
 }
 
-void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
 {
 	void *b;
 
@@ -560,7 +560,27 @@
 	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
 	return b;
 }
+EXPORT_SYMBOL(slob_alloc_node);
+
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+	return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
+}
+EXPORT_SYMBOL(kmem_cache_alloc);
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+{
+	return __do_kmalloc_node(size, gfp, node, _RET_IP_);
+}
+EXPORT_SYMBOL(__kmalloc_node);
+
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
+{
+	return slob_alloc_node(cachep, gfp, node);
+}
 EXPORT_SYMBOL(kmem_cache_alloc_node);
+#endif
 
 static void __kmem_cache_free(void *b, int size)
 {
diff --git a/mm/slub.c b/mm/slub.c
index 51df827..c3eb3d3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -373,7 +373,8 @@
 #endif
 	{
 		slab_lock(page);
-		if (page->freelist == freelist_old && page->counters == counters_old) {
+		if (page->freelist == freelist_old &&
+					page->counters == counters_old) {
 			page->freelist = freelist_new;
 			page->counters = counters_new;
 			slab_unlock(page);
@@ -411,7 +412,8 @@
 
 		local_irq_save(flags);
 		slab_lock(page);
-		if (page->freelist == freelist_old && page->counters == counters_old) {
+		if (page->freelist == freelist_old &&
+					page->counters == counters_old) {
 			page->freelist = freelist_new;
 			page->counters = counters_new;
 			slab_unlock(page);
@@ -553,8 +555,9 @@
 
 static void print_page_info(struct page *page)
 {
-	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
-		page, page->objects, page->inuse, page->freelist, page->flags);
+	printk(KERN_ERR
+	       "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
+	       page, page->objects, page->inuse, page->freelist, page->flags);
 
 }
 
@@ -629,7 +632,8 @@
 	print_trailer(s, page, object);
 }
 
-static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
+static void slab_err(struct kmem_cache *s, struct page *page,
+			const char *fmt, ...)
 {
 	va_list args;
 	char buf[100];
@@ -788,7 +792,8 @@
 	} else {
 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
 			check_bytes_and_report(s, page, p, "Alignment padding",
-				endobject, POISON_INUSE, s->inuse - s->object_size);
+				endobject, POISON_INUSE,
+				s->inuse - s->object_size);
 		}
 	}
 
@@ -873,7 +878,6 @@
 				object_err(s, page, object,
 					"Freechain corrupt");
 				set_freepointer(s, object, NULL);
-				break;
 			} else {
 				slab_err(s, page, "Freepointer corrupt");
 				page->freelist = NULL;
@@ -918,7 +922,8 @@
 			page->freelist);
 
 		if (!alloc)
-			print_section("Object ", (void *)object, s->object_size);
+			print_section("Object ", (void *)object,
+					s->object_size);
 
 		dump_stack();
 	}
@@ -937,7 +942,8 @@
 	return should_failslab(s->object_size, flags, s->flags);
 }
 
-static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
+static inline void slab_post_alloc_hook(struct kmem_cache *s,
+					gfp_t flags, void *object)
 {
 	flags &= gfp_allowed_mask;
 	kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
@@ -1039,7 +1045,8 @@
 	init_tracking(s, object);
 }
 
-static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
+static noinline int alloc_debug_processing(struct kmem_cache *s,
+					struct page *page,
 					void *object, unsigned long addr)
 {
 	if (!check_slab(s, page))
@@ -1743,7 +1750,8 @@
 /*
  * Remove the cpu slab
  */
-static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
+static void deactivate_slab(struct kmem_cache *s, struct page *page,
+				void *freelist)
 {
 	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -1999,7 +2007,8 @@
 		page->pobjects = pobjects;
 		page->next = oldpage;
 
-	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
+								!= oldpage);
 #endif
 }
 
@@ -2169,8 +2178,8 @@
 }
 
 /*
- * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
- * or deactivate the page.
+ * Check the page->freelist of a page and either transfer the freelist to the
+ * per cpu freelist or deactivate the page.
  *
  * The page is still frozen if the return value is not NULL.
  *
@@ -2314,7 +2323,8 @@
 		goto load_freelist;
 
 	/* Only entered in the debug case */
-	if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
+	if (kmem_cache_debug(s) &&
+			!alloc_debug_processing(s, page, freelist, addr))
 		goto new_slab;	/* Slab failed checks. Next slab needed */
 
 	deactivate_slab(s, page, get_freepointer(s, freelist));
@@ -2372,7 +2382,7 @@
 
 	object = c->freelist;
 	page = c->page;
-	if (unlikely(!object || !page || !node_match(page, node)))
+	if (unlikely(!object || !node_match(page, node)))
 		object = __slab_alloc(s, gfpflags, node, addr, c);
 
 	else {
@@ -2382,13 +2392,15 @@
 		 * The cmpxchg will only match if there was no additional
 		 * operation and if we are on the right processor.
 		 *
-		 * The cmpxchg does the following atomically (without lock semantics!)
+		 * The cmpxchg does the following atomically (without lock
+		 * semantics!)
 		 * 1. Relocate first pointer to the current per cpu area.
 		 * 2. Verify that tid and freelist have not been changed
 		 * 3. If they were not changed replace tid and freelist
 		 *
-		 * Since this is without lock semantics the protection is only against
-		 * code executing on this cpu *not* from access by other cpus.
+		 * Since this is without lock semantics the protection is only
+		 * against code executing on this cpu *not* from access by
+		 * other cpus.
 		 */
 		if (unlikely(!this_cpu_cmpxchg_double(
 				s->cpu_slab->freelist, s->cpu_slab->tid,
@@ -2420,7 +2432,8 @@
 {
 	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
 
-	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
+	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
+				s->size, gfpflags);
 
 	return ret;
 }
@@ -2434,14 +2447,6 @@
 	return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_trace);
-
-void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
-	void *ret = kmalloc_order(size, flags, order);
-	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
-	return ret;
-}
-EXPORT_SYMBOL(kmalloc_order_trace);
 #endif
 
 #ifdef CONFIG_NUMA
@@ -2512,8 +2517,10 @@
 			if (kmem_cache_has_cpu_partial(s) && !prior)
 
 				/*
-				 * Slab was on no list before and will be partially empty
-				 * We can defer the list move and instead freeze it.
+				 * Slab was on no list before and will be
+				 * partially empty
+				 * We can defer the list move and instead
+				 * freeze it.
 				 */
 				new.frozen = 1;
 
@@ -3071,8 +3078,8 @@
 	 * A) The number of objects from per cpu partial slabs dumped to the
 	 *    per node list when we reach the limit.
 	 * B) The number of objects in cpu partial slabs to extract from the
-	 *    per node list when we run out of per cpu objects. We only fetch 50%
-	 *    to keep some capacity around for frees.
+	 *    per node list when we run out of per cpu objects. We only fetch
+	 *    50% to keep some capacity around for frees.
 	 */
 	if (!kmem_cache_has_cpu_partial(s))
 		s->cpu_partial = 0;
@@ -3099,8 +3106,8 @@
 	if (flags & SLAB_PANIC)
 		panic("Cannot create slab %s size=%lu realsize=%u "
 			"order=%u offset=%u flags=%lx\n",
-			s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
-			s->offset, flags);
+			s->name, (unsigned long)s->size, s->size,
+			oo_order(s->oo), s->offset, flags);
 	return -EINVAL;
 }
 
@@ -3316,42 +3323,6 @@
 }
 EXPORT_SYMBOL(ksize);
 
-#ifdef CONFIG_SLUB_DEBUG
-bool verify_mem_not_deleted(const void *x)
-{
-	struct page *page;
-	void *object = (void *)x;
-	unsigned long flags;
-	bool rv;
-
-	if (unlikely(ZERO_OR_NULL_PTR(x)))
-		return false;
-
-	local_irq_save(flags);
-
-	page = virt_to_head_page(x);
-	if (unlikely(!PageSlab(page))) {
-		/* maybe it was from stack? */
-		rv = true;
-		goto out_unlock;
-	}
-
-	slab_lock(page);
-	if (on_freelist(page->slab_cache, page, object)) {
-		object_err(page->slab_cache, page, object, "Object is on free-list");
-		rv = false;
-	} else {
-		rv = true;
-	}
-	slab_unlock(page);
-
-out_unlock:
-	local_irq_restore(flags);
-	return rv;
-}
-EXPORT_SYMBOL(verify_mem_not_deleted);
-#endif
-
 void kfree(const void *x)
 {
 	struct page *page;
@@ -4162,15 +4133,17 @@
 				!cpumask_empty(to_cpumask(l->cpus)) &&
 				len < PAGE_SIZE - 60) {
 			len += sprintf(buf + len, " cpus=");
-			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
+			len += cpulist_scnprintf(buf + len,
+						 PAGE_SIZE - len - 50,
 						 to_cpumask(l->cpus));
 		}
 
 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
 				len < PAGE_SIZE - 60) {
 			len += sprintf(buf + len, " nodes=");
-			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
-					l->nodes);
+			len += nodelist_scnprintf(buf + len,
+						  PAGE_SIZE - len - 50,
+						  l->nodes);
 		}
 
 		len += sprintf(buf + len, "\n");
@@ -4268,18 +4241,17 @@
 	int node;
 	int x;
 	unsigned long *nodes;
-	unsigned long *per_cpu;
 
-	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
+	nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
 	if (!nodes)
 		return -ENOMEM;
-	per_cpu = nodes + nr_node_ids;
 
 	if (flags & SO_CPU) {
 		int cpu;
 
 		for_each_possible_cpu(cpu) {
-			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
+							       cpu);
 			int node;
 			struct page *page;
 
@@ -4304,8 +4276,6 @@
 				total += x;
 				nodes[node] += x;
 			}
-
-			per_cpu[node]++;
 		}
 	}
 
@@ -4315,12 +4285,11 @@
 		for_each_node_state(node, N_NORMAL_MEMORY) {
 			struct kmem_cache_node *n = get_node(s, node);
 
-		if (flags & SO_TOTAL)
-			x = atomic_long_read(&n->total_objects);
-		else if (flags & SO_OBJECTS)
-			x = atomic_long_read(&n->total_objects) -
-				count_partial(n, count_free);
-
+			if (flags & SO_TOTAL)
+				x = atomic_long_read(&n->total_objects);
+			else if (flags & SO_OBJECTS)
+				x = atomic_long_read(&n->total_objects) -
+					count_partial(n, count_free);
 			else
 				x = atomic_long_read(&n->nr_slabs);
 			total += x;
@@ -5136,7 +5105,8 @@
 
 #ifdef CONFIG_MEMCG_KMEM
 	if (!is_root_cache(s))
-		p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg));
+		p += sprintf(p, "-%08d",
+				memcg_cache_id(s->memcg_params->memcg));
 #endif
 
 	BUG_ON(p > name + ID_STR_LENGTH - 1);