Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  ring-buffer: Add missing unlock
  tracing: Fix lockdep warning in global_clock()
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 3bae418..4303614 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -16,6 +16,8 @@
 	- information about the BeOS filesystem for Linux.
 bfs.txt
 	- info for the SCO UnixWare Boot Filesystem (BFS).
+ceph.txt
+	- info for the Ceph Distributed File System
 cifs.txt
 	- description of the CIFS filesystem.
 coda.txt
diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt
index 6e03917..0660c9f 100644
--- a/Documentation/filesystems/ceph.txt
+++ b/Documentation/filesystems/ceph.txt
@@ -8,7 +8,7 @@
 
  * POSIX semantics
  * Seamless scaling from 1 to many thousands of nodes
- * High availability and reliability.  No single points of failure.
+ * High availability and reliability.  No single point of failure.
  * N-way replication of data across storage nodes
  * Fast recovery from node failures
  * Automatic rebalancing of data on node addition/removal
@@ -94,7 +94,7 @@
 
   wsize=X
 	Specify the maximum write size in bytes.  By default there is no
-	maximu.  Ceph will normally size writes based on the file stripe
+	maximum.  Ceph will normally size writes based on the file stripe
 	size.
 
   rsize=X
@@ -115,7 +115,7 @@
 	number of entries in that directory.
 
   nocrc
-	Disable CRC32C calculation for data writes.  If set, the OSD
+	Disable CRC32C calculation for data writes.  If set, the storage node
 	must rely on TCP's error correction to detect data corruption
 	in the data payload.
 
@@ -133,7 +133,8 @@
 	http://ceph.newdream.net/
 
 The Linux kernel client source tree is available at
-	git://ceph.newdream.net/linux-ceph-client.git
+	git://ceph.newdream.net/git/ceph-client.git
+	git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 
 and the source for the full system is at
-	git://ceph.newdream.net/ceph.git
+	git://ceph.newdream.net/git/ceph.git
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt b/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
index 6e37be1..4f89302 100644
--- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
@@ -21,6 +21,15 @@
 - fsl,qe-num-snums: define how many serial number(SNUM) the QE can use for the
   threads.
 
+Optional properties:
+- fsl,firmware-phandle:
+    Usage: required only if there is no fsl,qe-firmware child node
+    Value type: <phandle>
+    Definition: Points to a firmware node (see "QE Firmware Node" below)
+        that contains the firmware that should be uploaded for this QE.
+        The compatible property for the firmware node should say,
+        "fsl,qe-firmware".
+
 Recommended properties
 - brg-frequency : the internal clock source frequency for baud-rate
   generators in Hz.
@@ -59,3 +68,48 @@
 		reg = <0 c000>;
 	};
      };
+
+* QE Firmware Node
+
+This node defines a firmware binary that is embedded in the device tree, for
+the purpose of passing the firmware from bootloader to the kernel, or from
+the hypervisor to the guest.
+
+The firmware node itself contains the firmware binary contents, a compatible
+property, and any firmware-specific properties.  The node should be placed
+inside a QE node that needs it.  Doing so eliminates the need for a
+fsl,firmware-phandle property.  Other QE nodes that need the same firmware
+should define an fsl,firmware-phandle property that points to the firmware node
+in the first QE node.
+
+The fsl,firmware property can be specified in the DTS (possibly using incbin)
+or can be inserted by the boot loader at boot time.
+
+Required properties:
+  - compatible
+      Usage: required
+      Value type: <string>
+      Definition: A standard property.  Specify a string that indicates what
+          kind of firmware it is.  For QE, this should be "fsl,qe-firmware".
+
+   - fsl,firmware
+      Usage: required
+      Value type: <prop-encoded-array>, encoded as an array of bytes
+      Definition: A standard property.  This property contains the firmware
+          binary "blob".
+
+Example:
+	qe1@e0080000 {
+		compatible = "fsl,qe";
+		qe_firmware:qe-firmware {
+			compatible = "fsl,qe-firmware";
+			fsl,firmware = [0x70 0xcd 0x00 0x00 0x01 0x46 0x45 ...];
+		};
+		...
+	};
+
+	qe2@e0090000 {
+		compatible = "fsl,qe";
+		fsl,firmware-phandle = <&qe_firmware>;
+		...
+	};
diff --git a/MAINTAINERS b/MAINTAINERS
index 28d4bf0..3d29fa3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1443,7 +1443,7 @@
 
 CEPH DISTRIBUTED FILE SYSTEM CLIENT
 M:	Sage Weil <sage@newdream.net>
-L:	ceph-devel@lists.sourceforge.net
+L:	ceph-devel@vger.kernel.org
 W:	http://ceph.newdream.net/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 S:	Supported
@@ -3083,6 +3083,7 @@
 ISDN SUBSYSTEM
 M:	Karsten Keil <isdn@linux-pingi.de>
 L:	isdn4linux@listserv.isdn4linux.de (subscribers-only)
+L:	netdev@vger.kernel.org
 W:	http://www.isdn4linux.de
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git
 S:	Maintained
@@ -3269,6 +3270,16 @@
 F:	include/linux/kexec.h
 F:	kernel/kexec.c
 
+KEYS/KEYRINGS:
+M:	David Howells <dhowells@redhat.com>
+L:	keyrings@linux-nfs.org
+S:	Maintained
+F:	Documentation/keys.txt
+F:	include/linux/key.h
+F:	include/linux/key-type.h
+F:	include/keys/
+F:	security/keys/
+
 KGDB
 M:	Jason Wessel <jason.wessel@windriver.com>
 L:	kgdb-bugreport@lists.sourceforge.net
diff --git a/Makefile b/Makefile
index a5ba759..67c1001 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 34
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Man-Eating Seals of Antiquity
 
 # *DOCUMENTATION*
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 72da7e0..0d08d41 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -15,6 +15,7 @@
 #include <asm/glue.h>
 #include <asm/shmparam.h>
 #include <asm/cachetype.h>
+#include <asm/outercache.h>
 
 #define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
 
@@ -219,12 +220,6 @@
 	void (*dma_flush_range)(const void *, const void *);
 };
 
-struct outer_cache_fns {
-	void (*inv_range)(unsigned long, unsigned long);
-	void (*clean_range)(unsigned long, unsigned long);
-	void (*flush_range)(unsigned long, unsigned long);
-};
-
 /*
  * Select the calling method
  */
@@ -281,37 +276,6 @@
 
 #endif
 
-#ifdef CONFIG_OUTER_CACHE
-
-extern struct outer_cache_fns outer_cache;
-
-static inline void outer_inv_range(unsigned long start, unsigned long end)
-{
-	if (outer_cache.inv_range)
-		outer_cache.inv_range(start, end);
-}
-static inline void outer_clean_range(unsigned long start, unsigned long end)
-{
-	if (outer_cache.clean_range)
-		outer_cache.clean_range(start, end);
-}
-static inline void outer_flush_range(unsigned long start, unsigned long end)
-{
-	if (outer_cache.flush_range)
-		outer_cache.flush_range(start, end);
-}
-
-#else
-
-static inline void outer_inv_range(unsigned long start, unsigned long end)
-{ }
-static inline void outer_clean_range(unsigned long start, unsigned long end)
-{ }
-static inline void outer_flush_range(unsigned long start, unsigned long end)
-{ }
-
-#endif
-
 /*
  * Copy user data from/to a page which is mapped into a different
  * processes address space.  Really, we want to allow our "user
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
index 7a0690d..b56c138 100644
--- a/arch/arm/include/asm/clkdev.h
+++ b/arch/arm/include/asm/clkdev.h
@@ -13,6 +13,7 @@
 #define __ASM_CLKDEV_H
 
 struct clk;
+struct device;
 
 struct clk_lookup {
 	struct list_head	node;
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 328f14a..237282f 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -17,6 +17,7 @@
 
 #ifndef __ASSEMBLY__
 struct irqaction;
+struct pt_regs;
 extern void migrate_irqs(void);
 
 extern void asm_do_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
new file mode 100644
index 0000000..25f76ba
--- /dev/null
+++ b/arch/arm/include/asm/outercache.h
@@ -0,0 +1,75 @@
+/*
+ * arch/arm/include/asm/outercache.h
+ *
+ * Copyright (C) 2010 ARM Ltd.
+ * Written by Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_OUTERCACHE_H
+#define __ASM_OUTERCACHE_H
+
+struct outer_cache_fns {
+	void (*inv_range)(unsigned long, unsigned long);
+	void (*clean_range)(unsigned long, unsigned long);
+	void (*flush_range)(unsigned long, unsigned long);
+#ifdef CONFIG_OUTER_CACHE_SYNC
+	void (*sync)(void);
+#endif
+};
+
+#ifdef CONFIG_OUTER_CACHE
+
+extern struct outer_cache_fns outer_cache;
+
+static inline void outer_inv_range(unsigned long start, unsigned long end)
+{
+	if (outer_cache.inv_range)
+		outer_cache.inv_range(start, end);
+}
+static inline void outer_clean_range(unsigned long start, unsigned long end)
+{
+	if (outer_cache.clean_range)
+		outer_cache.clean_range(start, end);
+}
+static inline void outer_flush_range(unsigned long start, unsigned long end)
+{
+	if (outer_cache.flush_range)
+		outer_cache.flush_range(start, end);
+}
+
+#else
+
+static inline void outer_inv_range(unsigned long start, unsigned long end)
+{ }
+static inline void outer_clean_range(unsigned long start, unsigned long end)
+{ }
+static inline void outer_flush_range(unsigned long start, unsigned long end)
+{ }
+
+#endif
+
+#ifdef CONFIG_OUTER_CACHE_SYNC
+static inline void outer_sync(void)
+{
+	if (outer_cache.sync)
+		outer_cache.sync();
+}
+#else
+static inline void outer_sync(void)
+{ }
+#endif
+
+#endif	/* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index ca88e6a..4ace45e 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -60,6 +60,8 @@
 #include <linux/linkage.h>
 #include <linux/irqflags.h>
 
+#include <asm/outercache.h>
+
 #define __exception	__attribute__((section(".exception.text")))
 
 struct thread_info;
@@ -137,10 +139,12 @@
 #define dmb() __asm__ __volatile__ ("" : : : "memory")
 #endif
 
-#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
-#define mb()		dmb()
+#ifdef CONFIG_ARCH_HAS_BARRIERS
+#include <mach/barriers.h>
+#elif __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
+#define mb()		do { dsb(); outer_sync(); } while (0)
 #define rmb()		dmb()
-#define wmb()		dmb()
+#define wmb()		mb()
 #else
 #define mb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
 #define rmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
@@ -152,9 +156,9 @@
 #define smp_rmb()	barrier()
 #define smp_wmb()	barrier()
 #else
-#define smp_mb()	mb()
-#define smp_rmb()	rmb()
-#define smp_wmb()	wmb()
+#define smp_mb()	dmb()
+#define smp_rmb()	dmb()
+#define smp_wmb()	dmb()
 #endif
 
 #define read_barrier_depends()		do { } while(0)
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 60c62c3..610e0f5 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -393,6 +393,14 @@
 		/*
 		 * Setup an empty pt_regs. Fill SP and PC fields as
 		 * they're needed by longjmp_break_handler.
+		 *
+		 * We allocate some slack between the original SP and start of
+		 * our fabricated regs. To be precise we want to have worst case
+		 * covered which is STMFD with all 16 regs so we allocate 2 *
+		 * sizeof(struct_pt_regs)).
+		 *
+		 * This is to prevent any simulated instruction from writing
+		 * over the regs when they are accessing the stack.
 		 */
 		"sub    sp, %0, %1		\n\t"
 		"ldr    r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
@@ -410,7 +418,7 @@
 		"ldmia	sp, {r0 - pc}		\n\t"
 		:
 		: "r" (kcb->jprobe_saved_regs.ARM_sp),
-		  "I" (sizeof(struct pt_regs)),
+		  "I" (sizeof(struct pt_regs) * 2),
 		  "J" (offsetof(struct pt_regs, ARM_sp)),
 		  "J" (offsetof(struct pt_regs, ARM_pc)),
 		  "J" (offsetof(struct pt_regs, ARM_cpsr))
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 5025c86..938fc14 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -74,7 +74,7 @@
 		rsb	ip, ip, #32
 		addne	pc, pc, ip		@ C is always clear here
 		b	7f
-6:		nop
+6:		W(nop)
 		W(ldr)	r3, [r1, #-4]!
 		W(ldr)	r4, [r1, #-4]!
 		W(ldr)	r5, [r1, #-4]!
@@ -85,7 +85,7 @@
 
 		add	pc, pc, ip
 		nop
-		nop
+		W(nop)
 		W(str)	r3, [r0, #-4]!
 		W(str)	r4, [r0, #-4]!
 		W(str)	r5, [r0, #-4]!
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c4ed9f9..5bd7c89 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -736,6 +736,12 @@
 config OUTER_CACHE
 	bool
 
+config OUTER_CACHE_SYNC
+	bool
+	help
+	  The outer cache has a outer_cache_fns.sync function pointer
+	  that can be used to drain the write buffer of the outer cache.
+
 config CACHE_FEROCEON_L2
 	bool "Enable the Feroceon L2 cache controller"
 	depends on ARCH_KIRKWOOD || ARCH_MV78XX0
@@ -757,6 +763,7 @@
 		   REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
 	default y
 	select OUTER_CACHE
+	select OUTER_CACHE_SYNC
 	help
 	  This option enables the L2x0 PrimeCell.
 
@@ -781,3 +788,9 @@
 	int
 	default 6 if ARM_L1_CACHE_SHIFT_6
 	default 5
+
+config ARCH_HAS_BARRIERS
+	bool
+	help
+	  This option allows the use of custom mandatory barriers
+	  included via the mach/barriers.h file.
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 0733463..21ad68b 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -93,6 +93,15 @@
 }
 #endif
 
+static void l2x0_cache_sync(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&l2x0_lock, flags);
+	cache_sync();
+	spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
 static inline void l2x0_inv_all(void)
 {
 	unsigned long flags;
@@ -225,6 +234,7 @@
 	outer_cache.inv_range = l2x0_inv_range;
 	outer_cache.clean_range = l2x0_clean_range;
 	outer_cache.flush_range = l2x0_flush_range;
+	outer_cache.sync = l2x0_cache_sync;
 
 	printk(KERN_INFO "L2X0 cache controller enabled\n");
 }
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 7f3f59f..a420cb9 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -545,7 +545,7 @@
 		 */
 		elf_hwcap |= HWCAP_VFP;
 #ifdef CONFIG_VFPv3
-		if (VFP_arch >= 3) {
+		if (VFP_arch >= 2) {
 			elf_hwcap |= HWCAP_VFPv3;
 
 			/*
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c
index d4b9c36..bc0cfda 100644
--- a/arch/cris/arch-v32/drivers/pci/bios.c
+++ b/arch/cris/arch-v32/drivers/pci/bios.c
@@ -50,7 +50,7 @@
 	if ((res->flags & IORESOURCE_IO) && (start & 0x300))
 		start = (start + 0x3ff) & ~0x3ff;
 
-	return start
+	return start;
 }
 
 int pcibios_enable_resources(struct pci_dev *dev, int mask)
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 16bc2cb..6b4fb28 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -41,7 +41,7 @@
 	if ((res->flags & IORESOURCE_IO) && (start & 0x300))
 		start = (start + 0x3ff) & ~0x3ff;
 
-	return start
+	return start;
 }
 
 
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 203ec61..76818f9 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -75,9 +75,6 @@
 config HAVE_LATENCYTOP_SUPPORT
 	def_bool y
 
-config PCI
-	def_bool n
-
 config DTC
 	def_bool y
 
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 836832d..72f6e85 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -84,7 +84,7 @@
   echo '* linux.bin    - Create raw binary'
   echo '  linux.bin.gz - Create compressed raw binary'
   echo '  simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
-  echo '                   - stripped elf with fdt blob
+  echo '                   - stripped elf with fdt blob'
   echo '  simpleImage.<dt>.unstrip - full ELF image with fdt blob'
   echo '  *_defconfig      - Select default config from arch/microblaze/configs'
   echo ''
@@ -94,3 +94,5 @@
   echo '  name of a dts file from the arch/microblaze/boot/dts/ directory'
   echo '  (minus the .dts extension).'
 endef
+
+MRPROPER_FILES += $(boot)/simpleImage.*
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 902cf98..57f50c2 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -23,8 +23,6 @@
 endif
 
 $(obj)/linux.bin: vmlinux FORCE
-	[ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
-	touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
 	$(call if_changed,objcopy)
 	$(call if_changed,uimage)
 	@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
@@ -62,6 +60,4 @@
 $(obj)/%.dtb: $(dtstree)/%.dts FORCE
 	$(call if_changed,dtc)
 
-clean-kernel += linux.bin linux.bin.gz simpleImage.*
-
-clean-files += *.dtb simpleImage.*.unstrip
+clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 563c6b9..8eeb092 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -14,7 +14,6 @@
 #include <asm/ptrace.h>
 #include <asm/setup.h>
 #include <asm/registers.h>
-#include <asm/segment.h>
 #include <asm/entry.h>
 #include <asm/current.h>
 
diff --git a/arch/microblaze/include/asm/segment.h b/arch/microblaze/include/asm/segment.h
deleted file mode 100644
index 0e7102c..0000000
--- a/arch/microblaze/include/asm/segment.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2008-2009 PetaLogix
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_SEGMENT_H
-#define _ASM_MICROBLAZE_SEGMENT_H
-
-# ifndef __ASSEMBLY__
-
-typedef struct {
-	unsigned long seg;
-} mm_segment_t;
-
-/*
- * On Microblaze the fs value is actually the top of the corresponding
- * address space.
- *
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- *
- * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
- */
-# define MAKE_MM_SEG(s)       ((mm_segment_t) { (s) })
-
-#  ifndef CONFIG_MMU
-#  define KERNEL_DS	MAKE_MM_SEG(0)
-#  define USER_DS	KERNEL_DS
-#  else
-#  define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
-#  define USER_DS	MAKE_MM_SEG(TASK_SIZE - 1)
-#  endif
-
-# define get_ds()	(KERNEL_DS)
-# define get_fs()	(current_thread_info()->addr_limit)
-# define set_fs(val)	(current_thread_info()->addr_limit = (val))
-
-# define segment_eq(a, b)	((a).seg == (b).seg)
-
-# endif /* __ASSEMBLY__ */
-#endif /* _ASM_MICROBLAZE_SEGMENT_H */
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 6e92885..b2ca80f 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -19,7 +19,6 @@
 #ifndef __ASSEMBLY__
 # include <linux/types.h>
 # include <asm/processor.h>
-# include <asm/segment.h>
 
 /*
  * low level task data that entry.S needs immediate access to
@@ -60,6 +59,10 @@
 	__u32	fsr;
 };
 
+typedef struct {
+	unsigned long seg;
+} mm_segment_t;
+
 struct thread_info {
 	struct task_struct	*task; /* main task structure */
 	struct exec_domain	*exec_domain; /* execution domain */
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index bcb8b41..2e1353c 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -24,6 +24,7 @@
 extern void _tlbia(void);
 
 #define __tlbia()	{ preempt_disable(); _tlbia(); preempt_enable(); }
+#define __tlbie(x)	{ _tlbie(x); }
 
 static inline void local_flush_tlb_all(void)
 	{ __tlbia(); }
@@ -31,7 +32,7 @@
 	{ __tlbia(); }
 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
 				unsigned long vmaddr)
-	{ _tlbie(vmaddr); }
+	{ __tlbie(vmaddr); }
 static inline void local_flush_tlb_range(struct vm_area_struct *vma,
 		unsigned long start, unsigned long end)
 	{ __tlbia(); }
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 371bd6e..446bec2 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -22,101 +22,73 @@
 #include <asm/mmu.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
-#include <asm/segment.h>
 #include <linux/string.h>
 
 #define VERIFY_READ	0
 #define VERIFY_WRITE	1
 
-#define __clear_user(addr, n)	(memset((void *)(addr), 0, (n)), 0)
+/*
+ * On Microblaze the fs value is actually the top of the corresponding
+ * address space.
+ *
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ *
+ * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
+ */
+# define MAKE_MM_SEG(s)       ((mm_segment_t) { (s) })
+
+#  ifndef CONFIG_MMU
+#  define KERNEL_DS	MAKE_MM_SEG(0)
+#  define USER_DS	KERNEL_DS
+#  else
+#  define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
+#  define USER_DS	MAKE_MM_SEG(TASK_SIZE - 1)
+#  endif
+
+# define get_ds()	(KERNEL_DS)
+# define get_fs()	(current_thread_info()->addr_limit)
+# define set_fs(val)	(current_thread_info()->addr_limit = (val))
+
+# define segment_eq(a, b)	((a).seg == (b).seg)
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+struct exception_table_entry {
+	unsigned long insn, fixup;
+};
+
+/* Returns 0 if exception not found and fixup otherwise.  */
+extern unsigned long search_exception_table(unsigned long);
 
 #ifndef CONFIG_MMU
 
-extern int ___range_ok(unsigned long addr, unsigned long size);
+/* Check against bounds of physical memory */
+static inline int ___range_ok(unsigned long addr, unsigned long size)
+{
+	return ((addr < memory_start) ||
+		((addr + size) > memory_end));
+}
 
 #define __range_ok(addr, size) \
 		___range_ok((unsigned long)(addr), (unsigned long)(size))
 
 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
-#define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
 
-/* Undefined function to trigger linker error */
-extern int bad_user_access_length(void);
-
-/* FIXME this is function for optimalization -> memcpy */
-#define __get_user(var, ptr)				\
-({							\
-	int __gu_err = 0;				\
-	switch (sizeof(*(ptr))) {			\
-	case 1:						\
-	case 2:						\
-	case 4:						\
-		(var) = *(ptr);				\
-		break;					\
-	case 8:						\
-		memcpy((void *) &(var), (ptr), 8);	\
-		break;					\
-	default:					\
-		(var) = 0;				\
-		__gu_err = __get_user_bad();		\
-		break;					\
-	}						\
-	__gu_err;					\
-})
-
-#define __get_user_bad()	(bad_user_access_length(), (-EFAULT))
-
-/* FIXME is not there defined __pu_val */
-#define __put_user(var, ptr)					\
-({								\
-	int __pu_err = 0;					\
-	switch (sizeof(*(ptr))) {				\
-	case 1:							\
-	case 2:							\
-	case 4:							\
-		*(ptr) = (var);					\
-		break;						\
-	case 8: {						\
-		typeof(*(ptr)) __pu_val = (var);		\
-		memcpy(ptr, &__pu_val, sizeof(__pu_val));	\
-		}						\
-		break;						\
-	default:						\
-		__pu_err = __put_user_bad();			\
-		break;						\
-	}							\
-	__pu_err;						\
-})
-
-#define __put_user_bad()	(bad_user_access_length(), (-EFAULT))
-
-#define put_user(x, ptr)	__put_user((x), (ptr))
-#define get_user(x, ptr)	__get_user((x), (ptr))
-
-#define copy_to_user(to, from, n)	(memcpy((to), (from), (n)), 0)
-#define copy_from_user(to, from, n)	(memcpy((to), (from), (n)), 0)
-
-#define __copy_to_user(to, from, n)	(copy_to_user((to), (from), (n)))
-#define __copy_from_user(to, from, n)	(copy_from_user((to), (from), (n)))
-#define __copy_to_user_inatomic(to, from, n) \
-			(__copy_to_user((to), (from), (n)))
-#define __copy_from_user_inatomic(to, from, n) \
-			(__copy_from_user((to), (from), (n)))
-
-static inline unsigned long clear_user(void *addr, unsigned long size)
-{
-	if (access_ok(VERIFY_WRITE, addr, size))
-		size = __clear_user(addr, size);
-	return size;
-}
-
-/* Returns 0 if exception not found and fixup otherwise.  */
-extern unsigned long search_exception_table(unsigned long);
-
-extern long strncpy_from_user(char *dst, const char *src, long count);
-extern long strnlen_user(const char *src, long count);
-
-#else /* CONFIG_MMU */
+#else
 
 /*
  * Address is valid if:
@@ -129,24 +101,88 @@
 /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
  type?"WRITE":"READ",addr,size,get_fs().seg)) */
 
-/*
- * All the __XXX versions macros/functions below do not perform
- * access checking. It is assumed that the necessary checks have been
- * already performed before the finction (macro) is called.
+#endif
+
+#ifdef CONFIG_MMU
+# define __FIXUP_SECTION	".section .fixup,\"ax\"\n"
+# define __EX_TABLE_SECTION	".section __ex_table,\"a\"\n"
+#else
+# define __FIXUP_SECTION	".section .discard,\"ax\"\n"
+# define __EX_TABLE_SECTION	".section .discard,\"a\"\n"
+#endif
+
+extern unsigned long __copy_tofrom_user(void __user *to,
+		const void __user *from, unsigned long size);
+
+/* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */
+static inline unsigned long __must_check __clear_user(void __user *to,
+							unsigned long n)
+{
+	/* normal memset with two words to __ex_table */
+	__asm__ __volatile__ (				\
+			"1:	sb	r0, %2, r0;"	\
+			"	addik	%0, %0, -1;"	\
+			"	bneid	%0, 1b;"	\
+			"	addik	%2, %2, 1;"	\
+			"2:			"	\
+			__EX_TABLE_SECTION		\
+			".word	1b,2b;"			\
+			".previous;"			\
+		: "=r"(n)				\
+		: "0"(n), "r"(to)
+	);
+	return n;
+}
+
+static inline unsigned long __must_check clear_user(void __user *to,
+							unsigned long n)
+{
+	might_sleep();
+	if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
+		return n;
+
+	return __clear_user(to, n);
+}
+
+/* put_user and get_user macros */
+extern long __user_bad(void);
+
+#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err)	\
+({								\
+	__asm__ __volatile__ (					\
+			"1:"	insn	" %1, %2, r0;"		\
+			"	addk	%0, r0, r0;"		\
+			"2:			"		\
+			__FIXUP_SECTION				\
+			"3:	brid	2b;"			\
+			"	addik	%0, r0, %3;"		\
+			".previous;"				\
+			__EX_TABLE_SECTION			\
+			".word	1b,3b;"				\
+			".previous;"				\
+		: "=&r"(__gu_err), "=r"(__gu_val)		\
+		: "r"(__gu_ptr), "i"(-EFAULT)			\
+	);							\
+})
+
+/**
+ * get_user: - Get a simple variable from user space.
+ * @x:   Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
  */
 
-#define get_user(x, ptr)						\
-({									\
-	access_ok(VERIFY_READ, (ptr), sizeof(*(ptr)))			\
-		? __get_user((x), (ptr)) : -EFAULT;			\
-})
-
-#define put_user(x, ptr)						\
-({									\
-	access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr)))			\
-		? __put_user((x), (ptr)) : -EFAULT;			\
-})
-
 #define __get_user(x, ptr)						\
 ({									\
 	unsigned long __gu_val;						\
@@ -163,30 +199,74 @@
 		__get_user_asm("lw", (ptr), __gu_val, __gu_err);	\
 		break;							\
 	default:							\
-		__gu_val = 0; __gu_err = -EINVAL;			\
+		/* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
 	}								\
 	x = (__typeof__(*(ptr))) __gu_val;				\
 	__gu_err;							\
 })
 
-#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err)		\
+
+#define get_user(x, ptr)						\
 ({									\
-	__asm__ __volatile__ (						\
-			"1:"	insn	" %1, %2, r0;			\
-				addk	%0, r0, r0;			\
-			2:						\
-			.section .fixup,\"ax\";				\
-			3:	brid	2b;				\
-				addik	%0, r0, %3;			\
-			.previous;					\
-			.section __ex_table,\"a\";			\
-			.word	1b,3b;					\
-			.previous;"					\
-		: "=r"(__gu_err), "=r"(__gu_val)			\
-		: "r"(__gu_ptr), "i"(-EFAULT)				\
-	);								\
+	access_ok(VERIFY_READ, (ptr), sizeof(*(ptr)))			\
+		? __get_user((x), (ptr)) : -EFAULT;			\
 })
 
+#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err)	\
+({								\
+	__asm__ __volatile__ (					\
+			"1:"	insn	" %1, %2, r0;"		\
+			"	addk	%0, r0, r0;"		\
+			"2:			"		\
+			__FIXUP_SECTION				\
+			"3:	brid	2b;"			\
+			"	addik	%0, r0, %3;"		\
+			".previous;"				\
+			__EX_TABLE_SECTION			\
+			".word	1b,3b;"				\
+			".previous;"				\
+		: "=&r"(__gu_err)				\
+		: "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT)	\
+	);							\
+})
+
+#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err)		\
+({								\
+	__asm__ __volatile__ ("	lwi	%0, %1, 0;"		\
+			"1:	swi	%0, %2, 0;"		\
+			"	lwi	%0, %1, 4;"		\
+			"2:	swi	%0, %2, 4;"		\
+			"	addk	%0, r0, r0;"		\
+			"3:			"		\
+			__FIXUP_SECTION				\
+			"4:	brid	3b;"			\
+			"	addik	%0, r0, %3;"		\
+			".previous;"				\
+			__EX_TABLE_SECTION			\
+			".word	1b,4b,2b,4b;"			\
+			".previous;"				\
+		: "=&r"(__gu_err)				\
+		: "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT)	\
+		);						\
+})
+
+/**
+ * put_user: - Write a simple value into user space.
+ * @x:   Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+
 #define __put_user(x, ptr)						\
 ({									\
 	__typeof__(*(ptr)) volatile __gu_val = (x);			\
@@ -195,7 +275,7 @@
 	case 1:								\
 		__put_user_asm("sb", (ptr), __gu_val, __gu_err);	\
 		break;							\
-	case 2: 							\
+	case 2:								\
 		__put_user_asm("sh", (ptr), __gu_val, __gu_err);	\
 		break;							\
 	case 4:								\
@@ -205,121 +285,82 @@
 		__put_user_asm_8((ptr), __gu_val, __gu_err);		\
 		break;							\
 	default:							\
-		__gu_err = -EINVAL;					\
+		/*__gu_err = -EINVAL;*/	__gu_err = __user_bad();	\
 	}								\
 	__gu_err;							\
 })
 
-#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err)	\
-({							\
-__asm__ __volatile__ ("	lwi	%0, %1, 0;		\
-		1:	swi	%0, %2, 0;		\
-			lwi	%0, %1, 4;		\
-		2:	swi	%0, %2, 4;		\
-			addk	%0,r0,r0;		\
-		3:					\
-		.section .fixup,\"ax\";			\
-		4:	brid	3b;			\
-			addik	%0, r0, %3;		\
-		.previous;				\
-		.section __ex_table,\"a\";		\
-		.word	1b,4b,2b,4b;			\
-		.previous;"				\
-	: "=&r"(__gu_err)				\
-	: "r"(&__gu_val),				\
-	"r"(__gu_ptr), "i"(-EFAULT)			\
-	);						\
+#ifndef CONFIG_MMU
+
+#define put_user(x, ptr)	__put_user((x), (ptr))
+
+#else /* CONFIG_MMU */
+
+#define put_user(x, ptr)						\
+({									\
+	access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr)))			\
+		? __put_user((x), (ptr)) : -EFAULT;			\
 })
+#endif /* CONFIG_MMU */
 
-#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err)	\
-({								\
-	__asm__ __volatile__ (					\
-			"1:"	insn	" %1, %2, r0;		\
-				addk	%0, r0, r0;		\
-			2:					\
-			.section .fixup,\"ax\";			\
-			3:	brid	2b;			\
-				addik	%0, r0, %3;		\
-			.previous;				\
-			.section __ex_table,\"a\";		\
-			.word	1b,3b;				\
-			.previous;"				\
-		: "=r"(__gu_err)				\
-		: "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT)	\
-	);							\
-})
-
-/*
- * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
- */
-static inline int clear_user(char *to, int size)
-{
-	if (size && access_ok(VERIFY_WRITE, to, size)) {
-		__asm__ __volatile__ ("				\
-				1:				\
-					sb	r0, %2, r0;	\
-					addik	%0, %0, -1;	\
-					bneid	%0, 1b;		\
-					addik	%2, %2, 1;	\
-				2:				\
-				.section __ex_table,\"a\";	\
-				.word	1b,2b;			\
-				.section .text;"		\
-			: "=r"(size)				\
-			: "0"(size), "r"(to)
-		);
-	}
-	return size;
-}
-
-#define __copy_from_user(to, from, n)	copy_from_user((to), (from), (n))
+/* copy_to_from_user */
+#define __copy_from_user(to, from, n)	\
+	__copy_tofrom_user((__force void __user *)(to), \
+				(void __user *)(from), (n))
 #define __copy_from_user_inatomic(to, from, n) \
 		copy_from_user((to), (from), (n))
 
-#define copy_to_user(to, from, n)					\
-	(access_ok(VERIFY_WRITE, (to), (n)) ?				\
-		__copy_tofrom_user((void __user *)(to),			\
-			(__force const void __user *)(from), (n))	\
-		: -EFAULT)
+static inline long copy_from_user(void *to,
+		const void __user *from, unsigned long n)
+{
+	might_sleep();
+	if (access_ok(VERIFY_READ, from, n))
+		return __copy_from_user(to, from, n);
+	return n;
+}
 
-#define __copy_to_user(to, from, n)	copy_to_user((to), (from), (n))
+#define __copy_to_user(to, from, n)	\
+		__copy_tofrom_user((void __user *)(to), \
+			(__force const void __user *)(from), (n))
 #define __copy_to_user_inatomic(to, from, n)	copy_to_user((to), (from), (n))
 
-#define copy_from_user(to, from, n)					\
-	(access_ok(VERIFY_READ, (from), (n)) ?				\
-		__copy_tofrom_user((__force void __user *)(to),		\
-			(void __user *)(from), (n))			\
-		: -EFAULT)
-
-extern int __strncpy_user(char *to, const char __user *from, int len);
-extern int __strnlen_user(const char __user *sstr, int len);
-
-#define strncpy_from_user(to, from, len)	\
-		(access_ok(VERIFY_READ, from, 1) ?	\
-			__strncpy_user(to, from, len) : -EFAULT)
-#define strnlen_user(str, len)	\
-		(access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
-
-#endif /* CONFIG_MMU */
-
-extern unsigned long __copy_tofrom_user(void __user *to,
-		const void __user *from, unsigned long size);
+static inline long copy_to_user(void __user *to,
+		const void *from, unsigned long n)
+{
+	might_sleep();
+	if (access_ok(VERIFY_WRITE, to, n))
+		return __copy_to_user(to, from, n);
+	return n;
+}
 
 /*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
+ * Copy a null terminated string from userspace.
  */
-struct exception_table_entry {
-	unsigned long insn, fixup;
-};
+extern int __strncpy_user(char *to, const char __user *from, int len);
+
+#define __strncpy_from_user	__strncpy_user
+
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+	if (!access_ok(VERIFY_READ, src, 1))
+		return -EFAULT;
+	return __strncpy_from_user(dst, src, count);
+}
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 on exception, a value greater than N if too long
+ */
+extern int __strnlen_user(const char __user *sstr, int len);
+
+static inline long strnlen_user(const char __user *src, long n)
+{
+	if (!access_ok(VERIFY_READ, src, 1))
+		return 0;
+	return __strnlen_user(src, n);
+}
 
 #endif  /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index b108497..4d5b0311 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -37,7 +37,7 @@
 
 static unsigned long get_dma_direct_offset(struct device *dev)
 {
-	if (dev)
+	if (likely(dev))
 		return (unsigned long)dev->archdata.dma_data;
 
 	return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index cb7815c..da6a5f5 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -51,6 +51,12 @@
 
 	.text
 ENTRY(_start)
+#if CONFIG_KERNEL_BASE_ADDR == 0
+	brai	TOPHYS(real_start)
+	.org	0x100
+real_start:
+#endif
+
 	mfs	r1, rmsr
 	andi	r1, r1, ~2
 	mts	rmsr, r1
@@ -99,8 +105,8 @@
 	tophys(r4,r4)			/* convert to phys address */
 	ori	r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
 _copy_command_line:
-	lbu	r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
-	sb	r2, r4, r6		/* addr[r4+r6]= r7*/
+	lbu	r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */
+	sb	r2, r4, r6		/* addr[r4+r6]= r2*/
 	addik	r6, r6, 1		/* increment counting */
 	bgtid	r3, _copy_command_line	/* loop for all entries       */
 	addik	r3, r3, -1		/* descrement loop */
@@ -128,7 +134,7 @@
 	 * virtual to physical.
 	 */
 	nop
-	addik	r3, r0, 63		/* Invalidate all TLB entries */
+	addik	r3, r0, MICROBLAZE_TLB_SIZE -1	/* Invalidate all TLB entries */
 _invalidate:
 	mts	rtlbx, r3
 	mts	rtlbhi, r0			/* flush: ensure V is clear   */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 2b86c03..995a212 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -313,13 +313,13 @@
 	mfs	r5, rmsr;
 	nop
 	swi	r5, r1, 0;
-	mfs	r3, resr
+	mfs	r4, resr
 	nop
-	mfs	r4, rear;
+	mfs	r3, rear;
 	nop
 
 #ifndef CONFIG_MMU
-	andi	r5, r3, 0x1000;		/* Check ESR[DS] */
+	andi	r5, r4, 0x1000;		/* Check ESR[DS] */
 	beqi	r5, not_in_delay_slot;	/* Branch if ESR[DS] not set */
 	mfs	r17, rbtr;	/* ESR[DS] set - return address in BTR */
 	nop
@@ -327,13 +327,14 @@
 	swi	r17, r1, PT_R17
 #endif
 
-	andi	r5, r3, 0x1F;		/* Extract ESR[EXC] */
+	andi	r5, r4, 0x1F;		/* Extract ESR[EXC] */
 
 #ifdef CONFIG_MMU
 	/* Calculate exception vector offset = r5 << 2 */
 	addk	r6, r5, r5; /* << 1 */
 	addk	r6, r6, r6; /* << 2 */
 
+#ifdef DEBUG
 /* counting which exception happen */
 	lwi	r5, r0, 0x200 + TOPHYS(r0_ram)
 	addi	r5, r5, 1
@@ -341,6 +342,7 @@
 	lwi	r5, r6, 0x200 + TOPHYS(r0_ram)
 	addi	r5, r5, 1
 	swi	r5, r6, 0x200 + TOPHYS(r0_ram)
+#endif
 /* end */
 	/* Load the HW Exception vector */
 	lwi	r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
@@ -376,7 +378,7 @@
 	swi	r18, r1, PT_R18
 
 	or	r5, r1, r0
-	andi	r6, r3, 0x1F; /* Load ESR[EC] */
+	andi	r6, r4, 0x1F; /* Load ESR[EC] */
 	lwi	r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
 	swi	r7, r1, PT_MODE
 	mfs	r7, rfsr
@@ -426,11 +428,11 @@
  */
 handle_unaligned_ex:
 	/* Working registers already saved: R3, R4, R5, R6
-	 *  R3 = ESR
-	 *  R4 = EAR
+	 *  R4 = ESR
+	 *  R3 = EAR
 	 */
 #ifdef CONFIG_MMU
-	andi	r6, r3, 0x1000			/* Check ESR[DS] */
+	andi	r6, r4, 0x1000			/* Check ESR[DS] */
 	beqi	r6, _no_delayslot		/* Branch if ESR[DS] not set */
 	mfs	r17, rbtr;	/* ESR[DS] set - return address in BTR */
 	nop
@@ -439,7 +441,7 @@
 	RESTORE_STATE;
 	bri	unaligned_data_trap
 #endif
-	andi	r6, r3, 0x3E0; /* Mask and extract the register operand */
+	andi	r6, r4, 0x3E0; /* Mask and extract the register operand */
 	srl	r6, r6; /* r6 >> 5 */
 	srl	r6, r6;
 	srl	r6, r6;
@@ -448,33 +450,33 @@
 	/* Store the register operand in a temporary location */
 	sbi	r6, r0, TOPHYS(ex_reg_op);
 
-	andi	r6, r3, 0x400; /* Extract ESR[S] */
+	andi	r6, r4, 0x400; /* Extract ESR[S] */
 	bnei	r6, ex_sw;
 ex_lw:
-	andi	r6, r3, 0x800; /* Extract ESR[W] */
+	andi	r6, r4, 0x800; /* Extract ESR[W] */
 	beqi	r6, ex_lhw;
-	lbui	r5, r4, 0; /* Exception address in r4 */
+	lbui	r5, r3, 0; /* Exception address in r3 */
 	/* Load a word, byte-by-byte from destination address
 		and save it in tmp space */
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_0);
-	lbui	r5, r4, 1;
+	lbui	r5, r3, 1;
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_1);
-	lbui	r5, r4, 2;
+	lbui	r5, r3, 2;
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_2);
-	lbui	r5, r4, 3;
+	lbui	r5, r3, 3;
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_3);
-	/* Get the destination register value into r3 */
-	lwi	r3, r0, TOPHYS(ex_tmp_data_loc_0);
+	/* Get the destination register value into r4 */
+	lwi	r4, r0, TOPHYS(ex_tmp_data_loc_0);
 	bri	ex_lw_tail;
 ex_lhw:
-	lbui	r5, r4, 0; /* Exception address in r4 */
+	lbui	r5, r3, 0; /* Exception address in r3 */
 	/* Load a half-word, byte-by-byte from destination
 		address and save it in tmp space */
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_0);
-	lbui	r5, r4, 1;
+	lbui	r5, r3, 1;
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_1);
-	/* Get the destination register value into r3 */
-	lhui	r3, r0, TOPHYS(ex_tmp_data_loc_0);
+	/* Get the destination register value into r4 */
+	lhui	r4, r0, TOPHYS(ex_tmp_data_loc_0);
 ex_lw_tail:
 	/* Get the destination register number into r5 */
 	lbui	r5, r0, TOPHYS(ex_reg_op);
@@ -502,25 +504,25 @@
 	andi	r6, r6, 0x800; /* Extract ESR[W] */
 	beqi	r6, ex_shw;
 	/* Get the word - delay slot */
-	swi	r3, r0, TOPHYS(ex_tmp_data_loc_0);
+	swi	r4, r0, TOPHYS(ex_tmp_data_loc_0);
 	/* Store the word, byte-by-byte into destination address */
-	lbui	r3, r0, TOPHYS(ex_tmp_data_loc_0);
-	sbi	r3, r4, 0;
-	lbui	r3, r0, TOPHYS(ex_tmp_data_loc_1);
-	sbi	r3, r4, 1;
-	lbui	r3, r0, TOPHYS(ex_tmp_data_loc_2);
-	sbi	r3, r4, 2;
-	lbui	r3, r0, TOPHYS(ex_tmp_data_loc_3);
-	sbi	r3, r4, 3;
+	lbui	r4, r0, TOPHYS(ex_tmp_data_loc_0);
+	sbi	r4, r3, 0;
+	lbui	r4, r0, TOPHYS(ex_tmp_data_loc_1);
+	sbi	r4, r3, 1;
+	lbui	r4, r0, TOPHYS(ex_tmp_data_loc_2);
+	sbi	r4, r3, 2;
+	lbui	r4, r0, TOPHYS(ex_tmp_data_loc_3);
+	sbi	r4, r3, 3;
 	bri	ex_handler_done;
 
 ex_shw:
 	/* Store the lower half-word, byte-by-byte into destination address */
-	swi	r3, r0, TOPHYS(ex_tmp_data_loc_0);
-	lbui	r3, r0, TOPHYS(ex_tmp_data_loc_2);
-	sbi	r3, r4, 0;
-	lbui	r3, r0, TOPHYS(ex_tmp_data_loc_3);
-	sbi	r3, r4, 1;
+	swi	r4, r0, TOPHYS(ex_tmp_data_loc_0);
+	lbui	r4, r0, TOPHYS(ex_tmp_data_loc_2);
+	sbi	r4, r3, 0;
+	lbui	r4, r0, TOPHYS(ex_tmp_data_loc_3);
+	sbi	r4, r3, 1;
 ex_sw_end: /* Exception handling of store word, ends. */
 
 ex_handler_done:
@@ -560,21 +562,16 @@
 		 */
 		mfs	r11, rpid
 		nop
-		bri	4
-		mfs	r3, rear		/* Get faulting address */
-		nop
 		/* If we are faulting a kernel address, we have to use the
 		 * kernel page tables.
 		 */
-		ori	r4, r0, CONFIG_KERNEL_START
-		cmpu	r4, r3, r4
-		bgti	r4, ex3
+		ori	r5, r0, CONFIG_KERNEL_START
+		cmpu	r5, r3, r5
+		bgti	r5, ex3
 		/* First, check if it was a zone fault (which means a user
 		 * tried to access a kernel or read-protected page - always
 		 * a SEGV). All other faults here must be stores, so no
 		 * need to check ESR_S as well. */
-		mfs	r4, resr
-		nop
 		andi	r4, r4, 0x800		/* ESR_Z - zone protection */
 		bnei	r4, ex2
 
@@ -589,8 +586,6 @@
 		 * tried to access a kernel or read-protected page - always
 		 * a SEGV). All other faults here must be stores, so no
 		 * need to check ESR_S as well. */
-		mfs	r4, resr
-		nop
 		andi	r4, r4, 0x800		/* ESR_Z */
 		bnei	r4, ex2
 		/* get current task address */
@@ -665,8 +660,6 @@
 		 * R3 = ESR
 		 */
 
-		mfs	r3, rear		/* Get faulting address */
-		nop
 		RESTORE_STATE;
 		bri	page_fault_instr_trap
 
@@ -677,18 +670,15 @@
 	 */
 	handle_data_tlb_miss_exception:
 		/* Working registers already saved: R3, R4, R5, R6
-		 * R3 = ESR
+		 * R3 = EAR, R4 = ESR
 		 */
 		mfs	r11, rpid
 		nop
-		bri	4
-		mfs	r3, rear		/* Get faulting address */
-		nop
 
 		/* If we are faulting a kernel address, we have to use the
 		 * kernel page tables. */
-		ori	r4, r0, CONFIG_KERNEL_START
-		cmpu	r4, r3, r4
+		ori	r6, r0, CONFIG_KERNEL_START
+		cmpu	r4, r3, r6
 		bgti	r4, ex5
 		ori	r4, r0, swapper_pg_dir
 		mts	rpid, r0		/* TLB will have 0 TID */
@@ -731,9 +721,8 @@
 		 * Many of these bits are software only. Bits we don't set
 		 * here we (properly should) assume have the appropriate value.
 		 */
+		brid	finish_tlb_load
 		andni	r4, r4, 0x0ce2		/* Make sure 20, 21 are zero */
-
-		bri	finish_tlb_load
 	ex7:
 		/* The bailout. Restore registers to pre-exception conditions
 		 * and call the heavyweights to help us out.
@@ -754,9 +743,6 @@
 		 */
 		mfs	r11, rpid
 		nop
-		bri	4
-		mfs	r3, rear		/* Get faulting address */
-		nop
 
 		/* If we are faulting a kernel address, we have to use the
 		 * kernel page tables.
@@ -792,7 +778,7 @@
 		lwi	r4, r5, 0		/* Get Linux PTE */
 
 		andi	r6, r4, _PAGE_PRESENT
-		beqi	r6, ex7
+		beqi	r6, ex10
 
 		ori	r4, r4, _PAGE_ACCESSED
 		swi	r4, r5, 0
@@ -805,9 +791,8 @@
 		 * Many of these bits are software only. Bits we don't set
 		 * here we (properly should) assume have the appropriate value.
 		 */
+		brid	finish_tlb_load
 		andni	r4, r4, 0x0ce2		/* Make sure 20, 21 are zero */
-
-		bri	finish_tlb_load
 	ex10:
 		/* The bailout. Restore registers to pre-exception conditions
 		 * and call the heavyweights to help us out.
@@ -837,9 +822,9 @@
 		andi	r5, r5, (MICROBLAZE_TLB_SIZE-1)
 		ori	r6, r0, 1
 		cmp	r31, r5, r6
-		blti	r31, sem
+		blti	r31, ex12
 		addik	r5, r6, 1
-	sem:
+	ex12:
 		/* MS: save back current TLB index */
 		swi	r5, r0, TOPHYS(tlb_index)
 
@@ -859,7 +844,6 @@
 		nop
 
 		/* Done...restore registers and get out of here. */
-	ex12:
 		mts	rpid, r11
 		nop
 		bri 4
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
index df16c62..7cf8649 100644
--- a/arch/microblaze/kernel/misc.S
+++ b/arch/microblaze/kernel/misc.S
@@ -26,9 +26,10 @@
  * We avoid flushing the pinned 0, 1 and possibly 2 entries.
  */
 .globl _tlbia;
+.type  _tlbia, @function
 .align 4;
 _tlbia:
-	addik	r12, r0, 63 /* flush all entries (63 - 3) */
+	addik	r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
 	/* isync */
 _tlbia_1:
 	mts	rtlbx, r12
@@ -41,11 +42,13 @@
 	/* sync */
 	rtsd	r15, 8
 	nop
+	.size  _tlbia, . - _tlbia
 
 /*
  * Flush MMU TLB for a particular address (in r5)
  */
 .globl _tlbie;
+.type  _tlbie, @function
 .align 4;
 _tlbie:
 	mts	rtlbsx, r5 /* look up the address in TLB */
@@ -59,17 +62,20 @@
 	rtsd	r15, 8
 	nop
 
+	.size  _tlbie, . - _tlbie
+
 /*
  * Allocate TLB entry for early console
  */
 .globl early_console_reg_tlb_alloc;
+.type  early_console_reg_tlb_alloc, @function
 .align 4;
 early_console_reg_tlb_alloc:
 	/*
 	 * Load a TLB entry for the UART, so that microblaze_progress() can use
 	 * the UARTs nice and early.  We use a 4k real==virtual mapping.
 	 */
-	ori	r4, r0, 63
+	ori	r4, r0, MICROBLAZE_TLB_SIZE - 1
 	mts	rtlbx, r4 /* TLB slot 2 */
 
 	or	r4,r5,r0
@@ -86,6 +92,8 @@
 	rtsd	r15, 8
 	nop
 
+	.size  early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc
+
 /*
  * Copy a whole page (4096 bytes).
  */
@@ -104,6 +112,7 @@
 #define DCACHE_LINE_BYTES (4 * 4)
 
 .globl copy_page;
+.type  copy_page, @function
 .align 4;
 copy_page:
 	ori	r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
@@ -118,3 +127,5 @@
 	addik	r11, r11, -1
 	rtsd	r15, 8
 	nop
+
+	.size  copy_page, . - copy_page
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 812f1bf..09bed44 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -15,6 +15,7 @@
 #include <linux/bitops.h>
 #include <asm/system.h>
 #include <asm/pgalloc.h>
+#include <asm/uaccess.h> /* for USER_DS macros */
 #include <asm/cacheflush.h>
 
 void show_regs(struct pt_regs *regs)
@@ -74,7 +75,10 @@
 
 void default_idle(void)
 {
-	if (!hlt_counter) {
+	if (likely(hlt_counter)) {
+		while (!need_resched())
+			cpu_relax();
+	} else {
 		clear_thread_flag(TIF_POLLING_NRFLAG);
 		smp_mb__after_clear_bit();
 		local_irq_disable();
@@ -82,9 +86,7 @@
 			cpu_sleep();
 		local_irq_enable();
 		set_thread_flag(TIF_POLLING_NRFLAG);
-	} else
-		while (!need_resched())
-			cpu_relax();
+	}
 }
 
 void cpu_idle(void)
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index f974ec7..17c98db 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -92,6 +92,12 @@
 }
 #endif	/* CONFIG_MTD_UCLINUX_EBSS */
 
+#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
+#define eprintk early_printk
+#else
+#define eprintk printk
+#endif
+
 void __init machine_early_init(const char *cmdline, unsigned int ram,
 		unsigned int fdt, unsigned int msr)
 {
@@ -139,32 +145,32 @@
 	setup_early_printk(NULL);
 #endif
 
-	early_printk("Ramdisk addr 0x%08x, ", ram);
+	eprintk("Ramdisk addr 0x%08x, ", ram);
 	if (fdt)
-		early_printk("FDT at 0x%08x\n", fdt);
+		eprintk("FDT at 0x%08x\n", fdt);
 	else
-		early_printk("Compiled-in FDT at 0x%08x\n",
+		eprintk("Compiled-in FDT at 0x%08x\n",
 					(unsigned int)_fdt_start);
 
 #ifdef CONFIG_MTD_UCLINUX
-	early_printk("Found romfs @ 0x%08x (0x%08x)\n",
+	eprintk("Found romfs @ 0x%08x (0x%08x)\n",
 			romfs_base, romfs_size);
-	early_printk("#### klimit %p ####\n", old_klimit);
+	eprintk("#### klimit %p ####\n", old_klimit);
 	BUG_ON(romfs_size < 0); /* What else can we do? */
 
-	early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
+	eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
 			romfs_size, romfs_base, (unsigned)&_ebss);
 
-	early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
+	eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
 #endif
 
 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
 	if (msr)
-		early_printk("!!!Your kernel has setup MSR instruction but "
+		eprintk("!!!Your kernel has setup MSR instruction but "
 				"CPU don't have it %d\n", msr);
 #else
 	if (!msr)
-		early_printk("!!!Your kernel not setup MSR instruction but "
+		eprintk("!!!Your kernel not setup MSR instruction but "
 				"CPU have it %d\n", msr);
 #endif
 
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index eaaaf80..5e4570e 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -22,13 +22,11 @@
 	__enable_hw_exceptions();
 }
 
-static int kstack_depth_to_print = 24;
+static unsigned long kstack_depth_to_print = 24;
 
 static int __init kstack_setup(char *s)
 {
-	kstack_depth_to_print = strict_strtoul(s, 0, NULL);
-
-	return 1;
+	return !strict_strtoul(s, 0, &kstack_depth_to_print);
 }
 __setup("kstack=", kstack_setup);
 
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile
index b579db0..4dfe47d 100644
--- a/arch/microblaze/lib/Makefile
+++ b/arch/microblaze/lib/Makefile
@@ -10,5 +10,4 @@
 lib-y += memcpy.o memmove.o
 endif
 
-lib-$(CONFIG_NO_MMU) += uaccess.o
-lib-$(CONFIG_MMU) += uaccess_old.o
+lib-y += uaccess_old.o
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S
index 02e3ab4..fdc48bb 100644
--- a/arch/microblaze/lib/fastcopy.S
+++ b/arch/microblaze/lib/fastcopy.S
@@ -30,8 +30,9 @@
  */
 
 #include <linux/linkage.h>
-
+	.text
 	.globl	memcpy
+	.type  memcpy, @function
 	.ent	memcpy
 
 memcpy:
@@ -345,9 +346,11 @@
 	rtsd	r15, 8
 	nop
 
+.size  memcpy, . - memcpy
 .end memcpy
 /*----------------------------------------------------------------------------*/
 	.globl	memmove
+	.type  memmove, @function
 	.ent	memmove
 
 memmove:
@@ -659,4 +662,5 @@
 	rtsd	r15, 8
 	nop
 
+.size  memmove, . - memmove
 .end memmove
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c
index cc2108b..014bac9 100644
--- a/arch/microblaze/lib/memcpy.c
+++ b/arch/microblaze/lib/memcpy.c
@@ -53,7 +53,7 @@
 	const uint32_t *i_src;
 	uint32_t *i_dst;
 
-	if (c >= 4) {
+	if (likely(c >= 4)) {
 		unsigned  value, buf_hold;
 
 		/* Align the dstination to a word boundry. */
diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c
index 4df851d..ecfb663 100644
--- a/arch/microblaze/lib/memset.c
+++ b/arch/microblaze/lib/memset.c
@@ -33,22 +33,23 @@
 #ifdef __HAVE_ARCH_MEMSET
 void *memset(void *v_src, int c, __kernel_size_t n)
 {
-
 	char *src = v_src;
 #ifdef CONFIG_OPT_LIB_FUNCTION
 	uint32_t *i_src;
-	uint32_t w32;
+	uint32_t w32 = 0;
 #endif
 	/* Truncate c to 8 bits */
 	c = (c & 0xFF);
 
 #ifdef CONFIG_OPT_LIB_FUNCTION
-	/* Make a repeating word out of it */
-	w32 = c;
-	w32 |= w32 << 8;
-	w32 |= w32 << 16;
+	if (unlikely(c)) {
+		/* Make a repeating word out of it */
+		w32 = c;
+		w32 |= w32 << 8;
+		w32 |= w32 << 16;
+	}
 
-	if (n >= 4) {
+	if (likely(n >= 4)) {
 		/* Align the destination to a word boundary */
 		/* This is done in an endian independant manner */
 		switch ((unsigned) src & 3) {
diff --git a/arch/microblaze/lib/uaccess.c b/arch/microblaze/lib/uaccess.c
deleted file mode 100644
index a853fe0..0000000
--- a/arch/microblaze/lib/uaccess.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/string.h>
-#include <asm/uaccess.h>
-
-#include <asm/bug.h>
-
-long strnlen_user(const char __user *src, long count)
-{
-	return strlen(src) + 1;
-}
-
-#define __do_strncpy_from_user(dst, src, count, res)			\
-	do {								\
-		char *tmp;						\
-		strncpy(dst, src, count);				\
-		for (tmp = dst; *tmp && count > 0; tmp++, count--)	\
-			;						\
-		res = (tmp - dst);					\
-	} while (0)
-
-long __strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res;
-	__do_strncpy_from_user(dst, src, count, res);
-	return res;
-}
-
-long strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res = -EFAULT;
-	if (access_ok(VERIFY_READ, src, 1))
-		__do_strncpy_from_user(dst, src, count, res);
-	return res;
-}
-
-unsigned long __copy_tofrom_user(void __user *to,
-		const void __user *from, unsigned long size)
-{
-	memcpy(to, from, size);
-	return 0;
-}
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S
index 67f991c..5810cec 100644
--- a/arch/microblaze/lib/uaccess_old.S
+++ b/arch/microblaze/lib/uaccess_old.S
@@ -22,6 +22,7 @@
 
 	.text
 .globl __strncpy_user;
+.type  __strncpy_user, @function
 .align 4;
 __strncpy_user:
 
@@ -50,7 +51,7 @@
 3:
 	rtsd	r15,8
 	nop
-
+	.size   __strncpy_user, . - __strncpy_user
 
 	.section	.fixup, "ax"
 	.align	2
@@ -72,6 +73,7 @@
 
 	.text
 .globl __strnlen_user;
+.type  __strnlen_user, @function
 .align 4;
 __strnlen_user:
 	addik	r3,r6,0
@@ -90,7 +92,7 @@
 3:
 	rtsd	r15,8
 	nop
-
+	.size   __strnlen_user, . - __strnlen_user
 
 	.section	.fixup,"ax"
 4:
@@ -108,6 +110,7 @@
  */
 	.text
 .globl __copy_tofrom_user;
+.type  __copy_tofrom_user, @function
 .align 4;
 __copy_tofrom_user:
 	/*
@@ -116,20 +119,34 @@
 	 * r7, r3 - count
 	 * r4 - tempval
 	 */
-	addik	r3,r7,0
-	beqi	r3,3f
-1:
-	lbu	r4,r6,r0
-	addik	r6,r6,1
-2:
-	sb	r4,r5,r0
-	addik	r3,r3,-1
-	bneid	r3,1b
-	addik	r5,r5,1		/* delay slot */
+	beqid	r7, 3f /* zero size is not likely */
+	andi	r3, r7, 0x3 /* filter add count */
+	bneid	r3, 4f /* if is odd value then byte copying */
+	or	r3, r5, r6 /* find if is any to/from unaligned */
+	andi	r3, r3, 0x3 /* mask unaligned */
+	bneid	r3, 1f /* it is unaligned -> then jump */
+	or	r3, r0, r0
+
+/* at least one 4 byte copy */
+5:	lw	r4, r6, r3
+6:	sw	r4, r5, r3
+	addik	r7, r7, -4
+	bneid	r7, 5b
+	addik	r3, r3, 4
+	addik	r3, r7, 0
+	rtsd	r15, 8
+	nop
+4:	or	r3, r0, r0
+1:	lbu	r4,r6,r3
+2:	sb	r4,r5,r3
+	addik	r7,r7,-1
+	bneid	r7,1b
+	addik	r3,r3,1		/* delay slot */
 3:
+	addik	r3,r7,0
 	rtsd	r15,8
 	nop
-
+	.size   __copy_tofrom_user, . - __copy_tofrom_user
 
 	.section	__ex_table,"a"
-	.word	1b,3b,2b,3b
+	.word	1b,3b,2b,3b,5b,3b,6b,3b
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index d9d249a..7af87f4 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -106,7 +106,7 @@
 	regs->esr = error_code;
 
 	/* On a kernel SLB miss we can only check for a valid exception entry */
-	if (kernel_mode(regs) && (address >= TASK_SIZE)) {
+	if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
 		printk(KERN_WARNING "kernel task_size exceed");
 		_exception(SIGSEGV, regs, code, address);
 	}
@@ -122,7 +122,7 @@
 	}
 #endif /* CONFIG_KGDB */
 
-	if (in_atomic() || !mm) {
+	if (unlikely(in_atomic() || !mm)) {
 		if (kernel_mode(regs))
 			goto bad_area_nosemaphore;
 
@@ -150,7 +150,7 @@
 	 * source.  If this is invalid we can skip the address space check,
 	 * thus avoiding the deadlock.
 	 */
-	if (!down_read_trylock(&mm->mmap_sem)) {
+	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
 		if (kernel_mode(regs) && !search_exception_tables(regs->pc))
 			goto bad_area_nosemaphore;
 
@@ -158,16 +158,16 @@
 	}
 
 	vma = find_vma(mm, address);
-	if (!vma)
+	if (unlikely(!vma))
 		goto bad_area;
 
 	if (vma->vm_start <= address)
 		goto good_area;
 
-	if (!(vma->vm_flags & VM_GROWSDOWN))
+	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
 		goto bad_area;
 
-	if (!is_write)
+	if (unlikely(!is_write))
 		goto bad_area;
 
 	/*
@@ -179,7 +179,7 @@
 	 * before setting the user r1.  Thus we allow the stack to
 	 * expand to 1MB without further checks.
 	 */
-	if (address + 0x100000 < vma->vm_end) {
+	if (unlikely(address + 0x100000 < vma->vm_end)) {
 
 		/* get user regs even if this fault is in kernel mode */
 		struct pt_regs *uregs = current->thread.regs;
@@ -209,15 +209,15 @@
 	code = SEGV_ACCERR;
 
 	/* a write */
-	if (is_write) {
-		if (!(vma->vm_flags & VM_WRITE))
+	if (unlikely(is_write)) {
+		if (unlikely(!(vma->vm_flags & VM_WRITE)))
 			goto bad_area;
 	/* a read */
 	} else {
 		/* protection fault */
-		if (error_code & 0x08000000)
+		if (unlikely(error_code & 0x08000000))
 			goto bad_area;
-		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+		if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
 			goto bad_area;
 	}
 
@@ -235,7 +235,7 @@
 			goto do_sigbus;
 		BUG();
 	}
-	if (fault & VM_FAULT_MAJOR)
+	if (unlikely(fault & VM_FAULT_MAJOR))
 		current->maj_flt++;
 	else
 		current->min_flt++;
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 1608e2e..40bc10e 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -165,7 +165,6 @@
 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
 		ClearPageReserved(virt_to_page(addr));
 		init_page_count(virt_to_page(addr));
-		memset((void *)addr, 0xcc, PAGE_SIZE);
 		free_page(addr);
 		totalram_pages++;
 	}
@@ -208,14 +207,6 @@
 }
 
 #ifndef CONFIG_MMU
-/* Check against bounds of physical memory */
-int ___range_ok(unsigned long addr, unsigned long size)
-{
-	return ((addr < memory_start) ||
-		((addr + size) > memory_end));
-}
-EXPORT_SYMBOL(___range_ok);
-
 int page_is_ram(unsigned long pfn)
 {
 	return __range_ok(pfn, 0);
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 63a6fd0..d31312c 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -154,7 +154,7 @@
 		err = 0;
 		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
 				__pgprot(flags)));
-		if (mem_init_done)
+		if (unlikely(mem_init_done))
 			flush_HPTE(0, va, pmd_val(*pd));
 			/* flush_HPTE(0, va, pg); */
 	}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index 929d017..d4f8be3 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -481,6 +481,8 @@
 	if (rc)
 		goto err_bcom_rx_irq;
 
+	lpbfifo.dma_irqs_enabled = 1;
+
 	/* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
 	lpbfifo.bcom_tx_task =
 		bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig
index 18e3356..6041c66 100644
--- a/arch/sh/configs/ecovec24_defconfig
+++ b/arch/sh/configs/ecovec24_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan  4 11:20:36 2010
+# Linux kernel version: 2.6.34-rc2
+# Mon Mar 29 02:21:58 2010
 #
 CONFIG_SUPERH=y
 CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
 CONFIG_GENERIC_GPIO=y
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@
 CONFIG_ARCH_HAS_DEFAULT_IDLE=y
 CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
 CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 CONFIG_CONSTRUCTORS=y
 
@@ -47,9 +48,11 @@
 CONFIG_HAVE_KERNEL_GZIP=y
 CONFIG_HAVE_KERNEL_BZIP2=y
 CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
 CONFIG_KERNEL_GZIP=y
 # CONFIG_KERNEL_BZIP2 is not set
 # CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
 CONFIG_SWAP=y
 CONFIG_SYSVIPC=y
 CONFIG_SYSVIPC_SYSCTL=y
@@ -71,14 +74,8 @@
 # CONFIG_TREE_RCU_TRACE is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
 # CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
 # CONFIG_NAMESPACES is not set
 # CONFIG_BLK_DEV_INITRD is not set
@@ -107,7 +104,7 @@
 #
 # Kernel Performance Events And Counters
 #
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
 # CONFIG_PERF_COUNTERS is not set
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_COMPAT_BRK=y
@@ -116,13 +113,13 @@
 # CONFIG_SLOB is not set
 # CONFIG_PROFILING is not set
 CONFIG_HAVE_OPROFILE=y
-CONFIG_HAVE_IOREMAP_PROT=y
 CONFIG_HAVE_KPROBES=y
 CONFIG_HAVE_KRETPROBES=y
 CONFIG_HAVE_ARCH_TRACEHOOK=y
 CONFIG_HAVE_DMA_ATTRS=y
 CONFIG_HAVE_CLK=y
 CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
 
 #
 # GCOV-based kernel profiling
@@ -234,12 +231,12 @@
 CONFIG_QUICKLIST=y
 CONFIG_MMU=y
 CONFIG_PAGE_OFFSET=0x80000000
-CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_FORCE_MAX_ZONEORDER=12
 CONFIG_MEMORY_START=0x08000000
 CONFIG_MEMORY_SIZE=0x10000000
 CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
-# CONFIG_X2TLB is not set
+# CONFIG_PMB is not set
+CONFIG_X2TLB=y
 CONFIG_VSYSCALL=y
 CONFIG_ARCH_FLATMEM_ENABLE=y
 CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -247,6 +244,8 @@
 CONFIG_MAX_ACTIVE_REGIONS=1
 CONFIG_ARCH_POPULATES_NODE_MAP=y
 CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_8KB is not set
 # CONFIG_PAGE_SIZE_16KB is not set
@@ -262,7 +261,7 @@
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
 # CONFIG_KSM is not set
 CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
 
@@ -337,7 +336,6 @@
 # CONFIG_PREEMPT_VOLUNTARY is not set
 CONFIG_PREEMPT=y
 CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
 
 #
 # Boot options
@@ -347,7 +345,7 @@
 CONFIG_ENTRY_OFFSET=0x00001000
 CONFIG_CMDLINE_OVERWRITE=y
 # CONFIG_CMDLINE_EXTEND is not set
-CONFIG_CMDLINE="console=tty0, console=ttySC0,115200 root=/dev/nfs ip=dhcp mem=120M memchunk.vpu=4m"
+CONFIG_CMDLINE="console=tty0, console=ttySC0,115200 root=/dev/nfs ip=dhcp mem=248M memchunk.vpu=8m memchunk.veu0=4m"
 
 #
 # Bus options
@@ -373,6 +371,7 @@
 CONFIG_SUSPEND_FREEZER=y
 # CONFIG_HIBERNATION is not set
 CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
 # CONFIG_CPU_IDLE is not set
 CONFIG_NET=y
 
@@ -380,7 +379,6 @@
 # Networking options
 #
 CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
 CONFIG_UNIX=y
 # CONFIG_NET_KEY is not set
 CONFIG_INET=y
@@ -445,7 +443,45 @@
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_HAMRADIO is not set
 # CONFIG_CAN is not set
-# CONFIG_IRDA is not set
+CONFIG_IRDA=y
+
+#
+# IrDA protocols
+#
+# CONFIG_IRLAN is not set
+# CONFIG_IRCOMM is not set
+# CONFIG_IRDA_ULTRA is not set
+
+#
+# IrDA options
+#
+# CONFIG_IRDA_CACHE_LAST_LSAP is not set
+# CONFIG_IRDA_FAST_RR is not set
+# CONFIG_IRDA_DEBUG is not set
+
+#
+# Infrared-port device drivers
+#
+
+#
+# SIR device drivers
+#
+# CONFIG_IRTTY_SIR is not set
+
+#
+# Dongle support
+#
+CONFIG_SH_SIR=y
+# CONFIG_KINGSUN_DONGLE is not set
+# CONFIG_KSDAZZLE_DONGLE is not set
+# CONFIG_KS959_DONGLE is not set
+
+#
+# FIR device drivers
+#
+# CONFIG_USB_IRDA is not set
+# CONFIG_SIGMATEL_FIR is not set
+# CONFIG_MCS_FIR is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
 CONFIG_WIRELESS=y
@@ -556,6 +592,7 @@
 # CONFIG_MTD_NAND_NANDSIM is not set
 # CONFIG_MTD_NAND_PLATFORM is not set
 # CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_NAND_SH_FLCTL is not set
 # CONFIG_MTD_ONENAND is not set
 
 #
@@ -597,6 +634,7 @@
 # CONFIG_ICS932S401 is not set
 # CONFIG_ENCLOSURE_SERVICES is not set
 # CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
 # CONFIG_DS1682 is not set
 # CONFIG_TI_DAC7512 is not set
 # CONFIG_C2PORT is not set
@@ -616,6 +654,7 @@
 #
 # SCSI device support
 #
+CONFIG_SCSI_MOD=y
 # CONFIG_RAID_ATTRS is not set
 CONFIG_SCSI=y
 CONFIG_SCSI_DMA=y
@@ -768,7 +807,29 @@
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_INPUT_JOYSTICK is not set
 # CONFIG_INPUT_TABLET is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+CONFIG_TOUCHSCREEN_TSC2007=y
+# CONFIG_TOUCHSCREEN_W90X900 is not set
 # CONFIG_INPUT_MISC is not set
 
 #
@@ -802,10 +863,10 @@
 CONFIG_SERIAL_SH_SCI_CONSOLE=y
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
 CONFIG_UNIX98_PTYS=y
 # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_IPMI_HANDLER is not set
 CONFIG_HW_RANDOM=y
 # CONFIG_HW_RANDOM_TIMERIOMEM is not set
@@ -830,6 +891,7 @@
 # CONFIG_I2C_OCORES is not set
 CONFIG_I2C_SH_MOBILE=y
 # CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
 
 #
 # External I2C/SMBus adapter drivers
@@ -843,15 +905,9 @@
 #
 # CONFIG_I2C_PCA_PLATFORM is not set
 # CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
 # CONFIG_I2C_DEBUG_CORE is not set
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
 CONFIG_SPI=y
 CONFIG_SPI_MASTER=y
 
@@ -882,13 +938,16 @@
 #
 # Memory mapped GPIO expanders:
 #
+# CONFIG_GPIO_IT8761E is not set
 
 #
 # I2C GPIO expanders:
 #
+# CONFIG_GPIO_MAX7300 is not set
 # CONFIG_GPIO_MAX732X is not set
 # CONFIG_GPIO_PCA953X is not set
 # CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
 
 #
 # PCI GPIO expanders:
@@ -919,23 +978,26 @@
 #
 # Multifunction device drivers
 #
-# CONFIG_MFD_CORE is not set
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_88PM860X is not set
 # CONFIG_MFD_SM501 is not set
-# CONFIG_MFD_SH_MOBILE_SDHI is not set
+CONFIG_MFD_SH_MOBILE_SDHI=y
 # CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
 # CONFIG_TPS65010 is not set
 # CONFIG_TWL4030_CORE is not set
 # CONFIG_MFD_TMIO is not set
 # CONFIG_PMIC_DA903X is not set
 # CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
 # CONFIG_MFD_WM8400 is not set
 # CONFIG_MFD_WM831X is not set
 # CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
 # CONFIG_MFD_PCF50633 is not set
 # CONFIG_MFD_MC13783 is not set
 # CONFIG_AB3100_CORE is not set
 # CONFIG_EZX_PCAP is not set
-# CONFIG_MFD_88PM8607 is not set
 # CONFIG_AB4500_CORE is not set
 # CONFIG_REGULATOR is not set
 CONFIG_MEDIA_SUPPORT=y
@@ -985,10 +1047,10 @@
 # CONFIG_SOC_CAMERA_MT9M001 is not set
 # CONFIG_SOC_CAMERA_MT9M111 is not set
 # CONFIG_SOC_CAMERA_MT9T031 is not set
-# CONFIG_SOC_CAMERA_MT9T112 is not set
+CONFIG_SOC_CAMERA_MT9T112=y
 # CONFIG_SOC_CAMERA_MT9V022 is not set
 # CONFIG_SOC_CAMERA_RJ54N1 is not set
-# CONFIG_SOC_CAMERA_TW9910 is not set
+CONFIG_SOC_CAMERA_TW9910=y
 # CONFIG_SOC_CAMERA_PLATFORM is not set
 # CONFIG_SOC_CAMERA_OV772X is not set
 # CONFIG_SOC_CAMERA_OV9640 is not set
@@ -1001,6 +1063,7 @@
 # CONFIG_RADIO_SI470X is not set
 # CONFIG_USB_MR800 is not set
 # CONFIG_RADIO_TEA5764 is not set
+# CONFIG_RADIO_SAA7706H is not set
 # CONFIG_RADIO_TEF6862 is not set
 # CONFIG_DAB is not set
 
@@ -1034,6 +1097,7 @@
 #
 # CONFIG_FB_S1D13XXX is not set
 CONFIG_FB_SH_MOBILE_LCDC=y
+# CONFIG_FB_TMIO is not set
 # CONFIG_FB_VIRTUAL is not set
 # CONFIG_FB_METRONOME is not set
 # CONFIG_FB_MB862XX is not set
@@ -1062,7 +1126,46 @@
 # CONFIG_LOGO_SUPERH_MONO is not set
 # CONFIG_LOGO_SUPERH_VGA16 is not set
 CONFIG_LOGO_SUPERH_CLUT224=y
-# CONFIG_SOUND is not set
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_SEQ_DUMMY=y
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_SEQUENCER_OSS is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_SPI is not set
+CONFIG_SND_SUPERH=y
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
+
+#
+# SoC Audio support for SuperH
+#
+CONFIG_SND_SOC_SH4_FSI=y
+# CONFIG_SND_FSI_AK4642 is not set
+CONFIG_SND_FSI_DA7210=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_DA7210=y
+# CONFIG_SOUND_PRIME is not set
 CONFIG_HID_SUPPORT=y
 CONFIG_HID=y
 # CONFIG_HIDRAW is not set
@@ -1077,6 +1180,7 @@
 #
 # Special HID drivers
 #
+# CONFIG_HID_3M_PCT is not set
 # CONFIG_HID_A4TECH is not set
 # CONFIG_HID_APPLE is not set
 # CONFIG_HID_BELKIN is not set
@@ -1091,12 +1195,16 @@
 # CONFIG_HID_KENSINGTON is not set
 # CONFIG_HID_LOGITECH is not set
 # CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
 # CONFIG_HID_MONTEREY is not set
 # CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
 # CONFIG_HID_PANTHERLORD is not set
 # CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
 # CONFIG_HID_SAMSUNG is not set
 # CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
 # CONFIG_HID_SUNPLUS is not set
 # CONFIG_HID_GREENASIA is not set
 # CONFIG_HID_SMARTJOYPLUS is not set
@@ -1136,6 +1244,7 @@
 # CONFIG_USB_SL811_HCD is not set
 CONFIG_USB_R8A66597_HCD=y
 # CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
 
 #
 # USB Device Class drivers
@@ -1188,7 +1297,6 @@
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_LEGOTOWER is not set
 # CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
 # CONFIG_USB_LED is not set
 # CONFIG_USB_CYPRESS_CY7C63 is not set
 # CONFIG_USB_CYTHERM is not set
@@ -1200,8 +1308,45 @@
 # CONFIG_USB_IOWARRIOR is not set
 # CONFIG_USB_TEST is not set
 # CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
-# CONFIG_USB_GADGET is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+CONFIG_USB_GADGET_R8A66597=y
+CONFIG_USB_R8A66597=y
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_MULTI is not set
 
 #
 # OTG and related infrastructure
@@ -1224,10 +1369,8 @@
 # MMC/SD/SDIO Host Controller Drivers
 #
 # CONFIG_MMC_SDHCI is not set
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
 CONFIG_MMC_SPI=y
-# CONFIG_MMC_TMIO is not set
+CONFIG_MMC_TMIO=y
 # CONFIG_MEMSTICK is not set
 # CONFIG_NEW_LEDS is not set
 # CONFIG_ACCESSIBILITY is not set
@@ -1253,10 +1396,10 @@
 # CONFIG_RTC_DRV_DS1374 is not set
 # CONFIG_RTC_DRV_DS1672 is not set
 # CONFIG_RTC_DRV_MAX6900 is not set
-# CONFIG_RTC_DRV_RS5C372 is not set
+CONFIG_RTC_DRV_RS5C372=y
 # CONFIG_RTC_DRV_ISL1208 is not set
 # CONFIG_RTC_DRV_X1205 is not set
-CONFIG_RTC_DRV_PCF8563=y
+# CONFIG_RTC_DRV_PCF8563 is not set
 # CONFIG_RTC_DRV_PCF8583 is not set
 # CONFIG_RTC_DRV_M41T80 is not set
 # CONFIG_RTC_DRV_BQ32K is not set
@@ -1303,8 +1446,6 @@
 CONFIG_UIO=y
 # CONFIG_UIO_PDRV is not set
 CONFIG_UIO_PDRV_GENIRQ=y
-# CONFIG_UIO_SMX is not set
-# CONFIG_UIO_SERCOS3 is not set
 
 #
 # TI VLYNQ
@@ -1390,6 +1531,7 @@
 # CONFIG_EFS_FS is not set
 # CONFIG_JFFS2_FS is not set
 # CONFIG_UBIFS_FS is not set
+# CONFIG_LOGFS is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_SQUASHFS is not set
 # CONFIG_VXFS_FS is not set
@@ -1418,6 +1560,7 @@
 # CONFIG_RPCSEC_GSS_KRB5 is not set
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
 # CONFIG_CIFS is not set
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
@@ -1487,6 +1630,7 @@
 CONFIG_DEBUG_BUGVERBOSE=y
 # CONFIG_DEBUG_MEMORY_INIT is not set
 # CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
 # CONFIG_LATENCYTOP is not set
 CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_HAVE_FUNCTION_TRACER=y
@@ -1618,7 +1762,7 @@
 #
 CONFIG_BITREVERSE=y
 CONFIG_GENERIC_FIND_LAST_BIT=y
-# CONFIG_CRC_CCITT is not set
+CONFIG_CRC_CCITT=y
 # CONFIG_CRC16 is not set
 CONFIG_CRC_T10DIF=y
 CONFIG_CRC_ITU_T=y
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index ac04255..ce830fa 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -211,7 +211,9 @@
 
 #define VSYSCALL_AUX_ENT					\
 	if (vdso_enabled)					\
-		NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
+		NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);	\
+	else							\
+		NEW_AUX_ENT(AT_IGNORE, 0);
 #else
 #define VSYSCALL_AUX_ENT
 #endif /* CONFIG_VSYSCALL */
@@ -219,7 +221,7 @@
 #ifdef CONFIG_SH_FPU
 #define FPU_AUX_ENT	NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
 #else
-#define FPU_AUX_ENT
+#define FPU_AUX_ENT	NEW_AUX_ENT(AT_IGNORE, 0)
 #endif
 
 extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
diff --git a/arch/sh/include/cpu-sh4/cpu/mmu_context.h b/arch/sh/include/cpu-sh4/cpu/mmu_context.h
index 310ec92..5963124 100644
--- a/arch/sh/include/cpu-sh4/cpu/mmu_context.h
+++ b/arch/sh/include/cpu-sh4/cpu/mmu_context.h
@@ -30,6 +30,8 @@
 #define MMUCR_URB		0x00FC0000
 #define MMUCR_URB_SHIFT		18
 #define MMUCR_URB_NENTRIES	64
+#define MMUCR_URC		0x0000FC00
+#define MMUCR_URC_SHIFT		10
 
 #if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40)
 #define MMUCR_SE		(1 << 4)
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
index dce4f3f..0ffface 100644
--- a/arch/sh/kernel/cpufreq.c
+++ b/arch/sh/kernel/cpufreq.c
@@ -48,7 +48,7 @@
 		return -ENODEV;
 
 	cpus_allowed = current->cpus_allowed;
-	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
 	BUG_ON(smp_processor_id() != cpu);
 
@@ -66,7 +66,7 @@
 	freqs.flags	= 0;
 
 	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-	set_cpus_allowed(current, cpus_allowed);
+	set_cpus_allowed_ptr(current, &cpus_allowed);
 	clk_set_rate(cpuclk, freq);
 	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 
diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c
index df3ab58..cbf1dd5 100644
--- a/arch/sh/kernel/return_address.c
+++ b/arch/sh/kernel/return_address.c
@@ -9,6 +9,7 @@
  * for more details.
  */
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <asm/dwarf.h>
 
 #ifdef CONFIG_DWARF_UNWINDER
@@ -52,3 +53,5 @@
 }
 
 #endif
+
+EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index e124cf7..002cc61 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -69,6 +69,7 @@
 	unsigned int cpu;
 	struct mm_struct *mm = &init_mm;
 
+	enable_mmu();
 	atomic_inc(&mm->mm_count);
 	atomic_inc(&mm->mm_users);
 	current->active_mm = mm;
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c
index bdd0982..b71db6a 100644
--- a/arch/sh/mm/tlb-pteaex.c
+++ b/arch/sh/mm/tlb-pteaex.c
@@ -77,3 +77,31 @@
 	__raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
 	back_to_cached();
 }
+
+void local_flush_tlb_all(void)
+{
+	unsigned long flags, status;
+	int i;
+
+	/*
+	 * Flush all the TLB.
+	 */
+	local_irq_save(flags);
+	jump_to_uncached();
+
+	status = __raw_readl(MMUCR);
+	status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
+
+	if (status == 0)
+		status = MMUCR_URB_NENTRIES;
+
+	for (i = 0; i < status; i++)
+		__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
+
+	for (i = 0; i < 4; i++)
+		__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
+
+	back_to_cached();
+	ctrl_barrier();
+	local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 4f5f7cb..7a940db 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -77,3 +77,22 @@
 	for (i = 0; i < ways; i++)
 		__raw_writel(data, addr + (i << 8));
 }
+
+void local_flush_tlb_all(void)
+{
+	unsigned long flags, status;
+
+	/*
+	 * Flush all the TLB.
+	 *
+	 * Write to the MMU control register's bit:
+	 *	TF-bit for SH-3, TI-bit for SH-4.
+	 *      It's same position, bit #2.
+	 */
+	local_irq_save(flags);
+	status = __raw_readl(MMUCR);
+	status |= 0x04;
+	__raw_writel(status, MMUCR);
+	ctrl_barrier();
+	local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index ccac77f..cfdf793 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -80,3 +80,31 @@
 	__raw_writel(data, addr);
 	back_to_cached();
 }
+
+void local_flush_tlb_all(void)
+{
+	unsigned long flags, status;
+	int i;
+
+	/*
+	 * Flush all the TLB.
+	 */
+	local_irq_save(flags);
+	jump_to_uncached();
+
+	status = __raw_readl(MMUCR);
+	status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
+
+	if (status == 0)
+		status = MMUCR_URB_NENTRIES;
+
+	for (i = 0; i < status; i++)
+		__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
+
+	for (i = 0; i < 4; i++)
+		__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
+
+	back_to_cached();
+	ctrl_barrier();
+	local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-urb.c b/arch/sh/mm/tlb-urb.c
index bb5b909..c92ce20 100644
--- a/arch/sh/mm/tlb-urb.c
+++ b/arch/sh/mm/tlb-urb.c
@@ -24,13 +24,9 @@
 
 	local_irq_save(flags);
 
-	/* Load the entry into the TLB */
-	__update_tlb(vma, addr, pte);
-
-	/* ... and wire it up. */
 	status = __raw_readl(MMUCR);
 	urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
-	status &= ~MMUCR_URB;
+	status &= ~MMUCR_URC;
 
 	/*
 	 * Make sure we're not trying to wire the last TLB entry slot.
@@ -39,7 +35,23 @@
 
 	urb = urb % MMUCR_URB_NENTRIES;
 
+	/*
+	 * Insert this entry into the highest non-wired TLB slot (via
+	 * the URC field).
+	 */
+	status |= (urb << MMUCR_URC_SHIFT);
+	__raw_writel(status, MMUCR);
+	ctrl_barrier();
+
+	/* Load the entry into the TLB */
+	__update_tlb(vma, addr, pte);
+
+	/* ... and wire it up. */
+	status = __raw_readl(MMUCR);
+
+	status &= ~MMUCR_URB;
 	status |= (urb << MMUCR_URB_SHIFT);
+
 	__raw_writel(status, MMUCR);
 	ctrl_barrier();
 
diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c
index 77dc5ef..3fbe03c 100644
--- a/arch/sh/mm/tlbflush_32.c
+++ b/arch/sh/mm/tlbflush_32.c
@@ -119,31 +119,3 @@
 		local_irq_restore(flags);
 	}
 }
-
-void local_flush_tlb_all(void)
-{
-	unsigned long flags, status;
-	int i;
-
-	/*
-	 * Flush all the TLB.
-	 */
-	local_irq_save(flags);
-	jump_to_uncached();
-
-	status = __raw_readl(MMUCR);
-	status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
-
-	if (status == 0)
-		status = MMUCR_URB_NENTRIES;
-
-	for (i = 0; i < status; i++)
-		__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
-
-	for (i = 0; i < 4; i++)
-		__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
-
-	back_to_cached();
-	ctrl_barrier();
-	local_irq_restore(flags);
-}
diff --git a/arch/sparc/include/asm/stat.h b/arch/sparc/include/asm/stat.h
index 39327d6..a232e9e 100644
--- a/arch/sparc/include/asm/stat.h
+++ b/arch/sparc/include/asm/stat.h
@@ -53,8 +53,8 @@
 	ino_t		st_ino;
 	mode_t		st_mode;
 	short		st_nlink;
-	uid16_t		st_uid;
-	gid16_t		st_gid;
+	unsigned short	st_uid;
+	unsigned short	st_gid;
 	unsigned short	st_rdev;
 	off_t		st_size;
 	time_t		st_atime;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 68cb9b4..e277193 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1337,7 +1337,7 @@
 	callchain_store(entry, PERF_CONTEXT_USER);
 	callchain_store(entry, regs->tpc);
 
-	ufp = regs->u_regs[UREG_I6];
+	ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
 	do {
 		struct sparc_stackf32 *usf, sf;
 		unsigned long pc;
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index ca39c60..1eb8b00 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -107,12 +107,12 @@
 	unsigned long ret;
 
 	/* should return -EINVAL to userspace */
-	if (set_cpus_allowed(current, cpumask_of_cpu(cpu)))
+	if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
 		return 0;
 
 	ret = func(arg);
 
-	set_cpus_allowed(current, old_affinity);
+	set_cpus_allowed_ptr(current, &old_affinity);
 
 	return ret;
 }
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c
index 791c151..8f982b7 100644
--- a/arch/sparc/kernel/us2e_cpufreq.c
+++ b/arch/sparc/kernel/us2e_cpufreq.c
@@ -238,12 +238,12 @@
 		return 0;
 
 	cpus_allowed = current->cpus_allowed;
-	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
 	clock_tick = sparc64_get_clock_tick(cpu) / 1000;
 	estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
 
-	set_cpus_allowed(current, cpus_allowed);
+	set_cpus_allowed_ptr(current, &cpus_allowed);
 
 	return clock_tick / estar_to_divisor(estar);
 }
@@ -259,7 +259,7 @@
 		return;
 
 	cpus_allowed = current->cpus_allowed;
-	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
 	new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
 	new_bits = index_to_estar_mode(index);
@@ -281,7 +281,7 @@
 
 	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 
-	set_cpus_allowed(current, cpus_allowed);
+	set_cpus_allowed_ptr(current, &cpus_allowed);
 }
 
 static int us2e_freq_target(struct cpufreq_policy *policy,
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
index 365b646..f35d1e7 100644
--- a/arch/sparc/kernel/us3_cpufreq.c
+++ b/arch/sparc/kernel/us3_cpufreq.c
@@ -86,12 +86,12 @@
 		return 0;
 
 	cpus_allowed = current->cpus_allowed;
-	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
 	reg = read_safari_cfg();
 	ret = get_current_freq(cpu, reg);
 
-	set_cpus_allowed(current, cpus_allowed);
+	set_cpus_allowed_ptr(current, &cpus_allowed);
 
 	return ret;
 }
@@ -106,7 +106,7 @@
 		return;
 
 	cpus_allowed = current->cpus_allowed;
-	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
 	new_freq = sparc64_get_clock_tick(cpu) / 1000;
 	switch (index) {
@@ -140,7 +140,7 @@
 
 	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 
-	set_cpus_allowed(current, cpus_allowed);
+	set_cpus_allowed_ptr(current, &cpus_allowed);
 }
 
 static int us3_freq_target(struct cpufreq_policy *policy,
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index adedeef..b2e2460 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -7,6 +7,7 @@
 
 #include <linux/init.h>
 #include <linux/start_kernel.h>
+#include <linux/mm.h>
 
 #include <asm/setup.h>
 #include <asm/sections.h>
@@ -44,9 +45,10 @@
 #ifdef CONFIG_BLK_DEV_INITRD
 	/* Reserve INITRD */
 	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
+		/* Assume only end is not page aligned */
 		u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 		u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
-		u64 ramdisk_end   = ramdisk_image + ramdisk_size;
+		u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 		reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
 	}
 #endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index b5a9896..7147143 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -103,9 +103,10 @@
 #ifdef CONFIG_BLK_DEV_INITRD
 	/* Reserve INITRD */
 	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
+		/* Assume only end is not page aligned */
 		unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
 		unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
-		unsigned long ramdisk_end   = ramdisk_image + ramdisk_size;
+		unsigned long ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 		reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
 	}
 #endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 5d7ba1a..d76e185 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -314,16 +314,17 @@
 #define MAX_MAP_CHUNK	(NR_FIX_BTMAPS << PAGE_SHIFT)
 static void __init relocate_initrd(void)
 {
-
+	/* Assume only end is not page aligned */
 	u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 	u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
+	u64 area_size     = PAGE_ALIGN(ramdisk_size);
 	u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
 	u64 ramdisk_here;
 	unsigned long slop, clen, mapaddr;
 	char *p, *q;
 
 	/* We need to move the initrd down into lowmem */
-	ramdisk_here = find_e820_area(0, end_of_lowmem, ramdisk_size,
+	ramdisk_here = find_e820_area(0, end_of_lowmem, area_size,
 					 PAGE_SIZE);
 
 	if (ramdisk_here == -1ULL)
@@ -332,7 +333,7 @@
 
 	/* Note: this includes all the lowmem currently occupied by
 	   the initrd, we rely on that fact to keep the data intact. */
-	reserve_early(ramdisk_here, ramdisk_here + ramdisk_size,
+	reserve_early(ramdisk_here, ramdisk_here + area_size,
 			 "NEW RAMDISK");
 	initrd_start = ramdisk_here + PAGE_OFFSET;
 	initrd_end   = initrd_start + ramdisk_size;
@@ -376,9 +377,10 @@
 
 static void __init reserve_initrd(void)
 {
+	/* Assume only end is not page aligned */
 	u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 	u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
-	u64 ramdisk_end   = ramdisk_image + ramdisk_size;
+	u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 	u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
 
 	if (!boot_params.hdr.type_of_loader ||
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 44879df..2cc2497 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -291,8 +291,8 @@
 	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
 		__smp_locks = .;
 		*(.smp_locks)
-		__smp_locks_end = .;
 		. = ALIGN(PAGE_SIZE);
+		__smp_locks_end = .;
 	}
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index e71c5cb..452ee5b 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -331,11 +331,23 @@
 
 void free_init_pages(char *what, unsigned long begin, unsigned long end)
 {
-	unsigned long addr = begin;
+	unsigned long addr;
+	unsigned long begin_aligned, end_aligned;
 
-	if (addr >= end)
+	/* Make sure boundaries are page aligned */
+	begin_aligned = PAGE_ALIGN(begin);
+	end_aligned   = end & PAGE_MASK;
+
+	if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
+		begin = begin_aligned;
+		end   = end_aligned;
+	}
+
+	if (begin >= end)
 		return;
 
+	addr = begin;
+
 	/*
 	 * If debugging page accesses then do not free this memory but
 	 * mark them not present - any buggy init-section access will
@@ -343,7 +355,7 @@
 	 */
 #ifdef CONFIG_DEBUG_PAGEALLOC
 	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
-		begin, PAGE_ALIGN(end));
+		begin, end);
 	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
 #else
 	/*
@@ -358,8 +370,7 @@
 	for (; addr < end; addr += PAGE_SIZE) {
 		ClearPageReserved(virt_to_page(addr));
 		init_page_count(virt_to_page(addr));
-		memset((void *)(addr & ~(PAGE_SIZE-1)),
-			POISON_FREE_INITMEM, PAGE_SIZE);
+		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
 		free_page(addr);
 		totalram_pages++;
 	}
@@ -376,6 +387,15 @@
 #ifdef CONFIG_BLK_DEV_INITRD
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-	free_init_pages("initrd memory", start, end);
+	/*
+	 * end could be not aligned, and We can not align that,
+	 * decompresser could be confused by aligned initrd_end
+	 * We already reserve the end partial page before in
+	 *   - i386_start_kernel()
+	 *   - x86_64_start_kernel()
+	 *   - relocate_initrd()
+	 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
+	 */
+	free_init_pages("initrd memory", start, PAGE_ALIGN(end));
 }
 #endif
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 95d39c3..c59b4071 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -576,6 +576,10 @@
 			u8 rev = isa->revision;
 			pci_dev_put(isa);
 
+			if ((id->device == 0x0415 || id->device == 0x3164) &&
+			    (config->id != id->device))
+				continue;
+
 			if (rev >= config->rev_min && rev <= config->rev_max)
 				break;
 		}
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index a42c466..6da962c 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1423,6 +1423,8 @@
 	list_del_init(&tty->tty_files);
 	file_list_unlock();
 
+	put_pid(tty->pgrp);
+	put_pid(tty->session);
 	free_tty_struct(tty);
 }
 
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index f2aaf39..51103aa 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -104,6 +104,7 @@
 	if (connector->status == connector_status_disconnected) {
 		DRM_DEBUG_KMS("%s is disconnected\n",
 			  drm_get_connector_name(connector));
+		drm_mode_connector_update_edid_property(connector, NULL);
 		goto prune;
 	}
 
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f97e7c4..7e608f4 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -707,15 +707,6 @@
 	mode->vsync_end = mode->vsync_start + vsync_pulse_width;
 	mode->vtotal = mode->vdisplay + vblank;
 
-	/* perform the basic check for the detailed timing */
-	if (mode->hsync_end > mode->htotal ||
-		mode->vsync_end > mode->vtotal) {
-		drm_mode_destroy(dev, mode);
-		DRM_DEBUG_KMS("Incorrect detailed timing. "
-				"Sync is beyond the blank.\n");
-		return NULL;
-	}
-
 	/* Some EDIDs have bogus h/vtotal values */
 	if (mode->hsync_end > mode->htotal)
 		mode->htotal = mode->hsync_end + 1;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 5054970..9948723 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -283,6 +283,8 @@
 	.help_msg = "force-fb(V)",
 	.action_msg = "Restore framebuffer console",
 };
+#else
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
 #endif
 
 static void drm_fb_helper_on(struct fb_info *info)
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 08d14df..4804872 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -140,14 +140,16 @@
 		spin_unlock(&dev->count_lock);
 	}
 out:
-	mutex_lock(&dev->struct_mutex);
-	if (minor->type == DRM_MINOR_LEGACY) {
-		BUG_ON((dev->dev_mapping != NULL) &&
-			(dev->dev_mapping != inode->i_mapping));
-		if (dev->dev_mapping == NULL)
-			dev->dev_mapping = inode->i_mapping;
+	if (!retcode) {
+		mutex_lock(&dev->struct_mutex);
+		if (minor->type == DRM_MINOR_LEGACY) {
+			if (dev->dev_mapping == NULL)
+				dev->dev_mapping = inode->i_mapping;
+			else if (dev->dev_mapping != inode->i_mapping)
+				retcode = -ENODEV;
+		}
+		mutex_unlock(&dev->struct_mutex);
 	}
-	mutex_unlock(&dev->struct_mutex);
 
 	return retcode;
 }
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 32db806..7f0d807 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -12,7 +12,7 @@
              nouveau_dp.o nouveau_grctx.o \
              nv04_timer.o \
              nv04_mc.o nv40_mc.o nv50_mc.o \
-             nv04_fb.o nv10_fb.o nv40_fb.o \
+             nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \
              nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
              nv04_graph.o nv10_graph.o nv20_graph.o \
              nv40_graph.o nv50_graph.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 75bceee7..b5a9336 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -5211,6 +5211,21 @@
 }
 
 static void
+apply_dcb_connector_quirks(struct nvbios *bios, int idx)
+{
+	struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
+	struct drm_device *dev = bios->dev;
+
+	/* Gigabyte NX85T */
+	if ((dev->pdev->device == 0x0421) &&
+	    (dev->pdev->subsystem_vendor == 0x1458) &&
+	    (dev->pdev->subsystem_device == 0x344c)) {
+		if (cte->type == DCB_CONNECTOR_HDMI_1)
+			cte->type = DCB_CONNECTOR_DVI_I;
+	}
+}
+
+static void
 parse_dcb_connector_table(struct nvbios *bios)
 {
 	struct drm_device *dev = bios->dev;
@@ -5238,13 +5253,14 @@
 	entry = conntab + conntab[1];
 	cte = &ct->entry[0];
 	for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
+		cte->index = i;
 		if (conntab[3] == 2)
 			cte->entry = ROM16(entry[0]);
 		else
 			cte->entry = ROM32(entry[0]);
 
 		cte->type  = (cte->entry & 0x000000ff) >> 0;
-		cte->index = (cte->entry & 0x00000f00) >> 8;
+		cte->index2 = (cte->entry & 0x00000f00) >> 8;
 		switch (cte->entry & 0x00033000) {
 		case 0x00001000:
 			cte->gpio_tag = 0x07;
@@ -5266,6 +5282,8 @@
 		if (cte->type == 0xff)
 			continue;
 
+		apply_dcb_connector_quirks(bios, i);
+
 		NV_INFO(dev, "  %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
 			i, cte->entry, cte->type, cte->index, cte->gpio_tag);
 
@@ -5287,10 +5305,16 @@
 			break;
 		default:
 			cte->type = divine_connector_type(bios, cte->index);
-			NV_WARN(dev, "unknown type, using 0x%02x", cte->type);
+			NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
 			break;
 		}
 
+		if (nouveau_override_conntype) {
+			int type = divine_connector_type(bios, cte->index);
+			if (type != cte->type)
+				NV_WARN(dev, " -> type 0x%02x\n", cte->type);
+		}
+
 	}
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 9f688aa..4f88e69 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -72,9 +72,10 @@
 };
 
 struct dcb_connector_table_entry {
+	uint8_t index;
 	uint32_t entry;
 	enum dcb_connector_type type;
-	uint8_t index;
+	uint8_t index2;
 	uint8_t gpio_tag;
 };
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 028719f..0266124 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -439,8 +439,7 @@
 
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
-		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
-					 TTM_PL_FLAG_SYSTEM);
+		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT);
 		break;
 	default:
 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 24327f4..14afe1e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -302,7 +302,7 @@
 
 detect_analog:
 	nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
-	if (!nv_encoder)
+	if (!nv_encoder && !nouveau_tv_disable)
 		nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
 	if (nv_encoder) {
 		struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index c8482a1..65c441a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -190,6 +190,11 @@
 	nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
 
 	chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
+
+	DRM_MEMORYBARRIER();
+	/* Flush writes. */
+	nouveau_bo_rd32(pb, 0);
+
 	nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
 	chan->dma.ib_free--;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 30cc09e..1de974a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -83,6 +83,14 @@
 int nouveau_nofbaccel = 0;
 module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
 
+MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
+int nouveau_override_conntype = 0;
+module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
+
+MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n");
+int nouveau_tv_disable = 0;
+module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
+
 MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
 		 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
 		 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
@@ -154,9 +162,11 @@
 	if (pm_state.event == PM_EVENT_PRETHAW)
 		return 0;
 
+	NV_INFO(dev, "Disabling fbcon acceleration...\n");
 	fbdev_flags = dev_priv->fbdev_info->flags;
 	dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
 
+	NV_INFO(dev, "Unpinning framebuffer(s)...\n");
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_framebuffer *nouveau_fb;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 4b9aaf2..d8b5590 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -681,6 +681,7 @@
 extern int nouveau_vram_pushbuf;
 extern int nouveau_vram_notify;
 extern int nouveau_fbpercrtc;
+extern int nouveau_tv_disable;
 extern char *nouveau_tv_norm;
 extern int nouveau_reg_debug;
 extern char *nouveau_vbios;
@@ -688,6 +689,7 @@
 extern int nouveau_ignorelid;
 extern int nouveau_nofbaccel;
 extern int nouveau_noaccel;
+extern int nouveau_override_conntype;
 
 extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
 extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -926,6 +928,10 @@
 extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
 				      uint32_t, uint32_t);
 
+/* nv50_fb.c */
+extern int  nv50_fb_init(struct drm_device *);
+extern void nv50_fb_takedown(struct drm_device *);
+
 /* nv04_fifo.c */
 extern int  nv04_fifo_init(struct drm_device *);
 extern void nv04_fifo_disable(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 95220dd..2bd59a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -311,6 +311,31 @@
 #define nouveau_print_bitfield_names(val, namelist) \
 	nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
 
+struct nouveau_enum_names {
+	uint32_t value;
+	const char *name;
+};
+
+static void
+nouveau_print_enum_names_(uint32_t value,
+				const struct nouveau_enum_names *namelist,
+				const int namelist_len)
+{
+	/*
+	 * Caller must have already printed the KERN_* log level for us.
+	 * Also the caller is responsible for adding the newline.
+	 */
+	int i;
+	for (i = 0; i < namelist_len; ++i) {
+		if (value == namelist[i].value) {
+			printk("%s", namelist[i].name);
+			return;
+		}
+	}
+	printk("unknown value 0x%08x", value);
+}
+#define nouveau_print_enum_names(val, namelist) \
+	nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
 
 static int
 nouveau_graph_chid_from_grctx(struct drm_device *dev)
@@ -427,14 +452,16 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
 
-	NV_INFO(dev, "%s - nSource:", id);
-	nouveau_print_bitfield_names(nsource, nsource_names);
-	printk(", nStatus:");
-	if (dev_priv->card_type < NV_10)
-		nouveau_print_bitfield_names(nstatus, nstatus_names);
-	else
-		nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
-	printk("\n");
+	if (dev_priv->card_type < NV_50) {
+		NV_INFO(dev, "%s - nSource:", id);
+		nouveau_print_bitfield_names(nsource, nsource_names);
+		printk(", nStatus:");
+		if (dev_priv->card_type < NV_10)
+			nouveau_print_bitfield_names(nstatus, nstatus_names);
+		else
+			nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
+		printk("\n");
+	}
 
 	NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
 					"Data 0x%08x:0x%08x\n",
@@ -578,27 +605,502 @@
 }
 
 static void
+nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t trap[6];
+	int i, ch;
+	uint32_t idx = nv_rd32(dev, 0x100c90);
+	if (idx & 0x80000000) {
+		idx &= 0xffffff;
+		if (display) {
+			for (i = 0; i < 6; i++) {
+				nv_wr32(dev, 0x100c90, idx | i << 24);
+				trap[i] = nv_rd32(dev, 0x100c94);
+			}
+			for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
+				struct nouveau_channel *chan = dev_priv->fifos[ch];
+
+				if (!chan || !chan->ramin)
+					continue;
+
+				if (trap[1] == chan->ramin->instance >> 12)
+					break;
+			}
+			NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
+					name, (trap[5]&0x100?"read":"write"),
+					trap[5]&0xff, trap[4]&0xffff,
+					trap[3]&0xffff, trap[0], trap[2], ch);
+		}
+		nv_wr32(dev, 0x100c90, idx | 0x80000000);
+	} else if (display) {
+		NV_INFO(dev, "%s - no VM fault?\n", name);
+	}
+}
+
+static struct nouveau_enum_names nv50_mp_exec_error_names[] =
+{
+	{ 3, "STACK_UNDERFLOW" },
+	{ 4, "QUADON_ACTIVE" },
+	{ 8, "TIMEOUT" },
+	{ 0x10, "INVALID_OPCODE" },
+	{ 0x40, "BREAKPOINT" },
+};
+
+static void
+nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t units = nv_rd32(dev, 0x1540);
+	uint32_t addr, mp10, status, pc, oplow, ophigh;
+	int i;
+	int mps = 0;
+	for (i = 0; i < 4; i++) {
+		if (!(units & 1 << (i+24)))
+			continue;
+		if (dev_priv->chipset < 0xa0)
+			addr = 0x408200 + (tpid << 12) + (i << 7);
+		else
+			addr = 0x408100 + (tpid << 11) + (i << 7);
+		mp10 = nv_rd32(dev, addr + 0x10);
+		status = nv_rd32(dev, addr + 0x14);
+		if (!status)
+			continue;
+		if (display) {
+			nv_rd32(dev, addr + 0x20);
+			pc = nv_rd32(dev, addr + 0x24);
+			oplow = nv_rd32(dev, addr + 0x70);
+			ophigh= nv_rd32(dev, addr + 0x74);
+			NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
+					"TP %d MP %d: ", tpid, i);
+			nouveau_print_enum_names(status,
+					nv50_mp_exec_error_names);
+			printk(" at %06x warp %d, opcode %08x %08x\n",
+					pc&0xffffff, pc >> 24,
+					oplow, ophigh);
+		}
+		nv_wr32(dev, addr + 0x10, mp10);
+		nv_wr32(dev, addr + 0x14, 0);
+		mps++;
+	}
+	if (!mps && display)
+		NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
+				"No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
+		uint32_t ustatus_new, int display, const char *name)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int tps = 0;
+	uint32_t units = nv_rd32(dev, 0x1540);
+	int i, r;
+	uint32_t ustatus_addr, ustatus;
+	for (i = 0; i < 16; i++) {
+		if (!(units & (1 << i)))
+			continue;
+		if (dev_priv->chipset < 0xa0)
+			ustatus_addr = ustatus_old + (i << 12);
+		else
+			ustatus_addr = ustatus_new + (i << 11);
+		ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
+		if (!ustatus)
+			continue;
+		tps++;
+		switch (type) {
+		case 6: /* texture error... unknown for now */
+			nv50_pfb_vm_trap(dev, display, name);
+			if (display) {
+				NV_ERROR(dev, "magic set %d:\n", i);
+				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+					NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+						nv_rd32(dev, r));
+			}
+			break;
+		case 7: /* MP error */
+			if (ustatus & 0x00010000) {
+				nv50_pgraph_mp_trap(dev, i, display);
+				ustatus &= ~0x00010000;
+			}
+			break;
+		case 8: /* TPDMA error */
+			{
+			uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
+			uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
+			uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
+			uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
+			uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
+			uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
+			uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
+			nv50_pfb_vm_trap(dev, display, name);
+			/* 2d engine destination */
+			if (ustatus & 0x00000010) {
+				if (display) {
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000010;
+			}
+			/* Render target */
+			if (ustatus & 0x00000040) {
+				if (display) {
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000040;
+			}
+			/* CUDA memory: l[], g[] or stack. */
+			if (ustatus & 0x00000080) {
+				if (display) {
+					if (e18 & 0x80000000) {
+						/* g[] read fault? */
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 24) & 0x1f));
+						e18 &= ~0x1f000000;
+					} else if (e18 & 0xc) {
+						/* g[] write fault? */
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 7) & 0x1f));
+						e18 &= ~0x00000f80;
+					} else {
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+								i, e14, e10);
+					}
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000080;
+			}
+			}
+			break;
+		}
+		if (ustatus) {
+			if (display)
+				NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+		}
+		nv_wr32(dev, ustatus_addr, 0xc0000000);
+	}
+
+	if (!tps && display)
+		NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
+}
+
+static void
+nv50_pgraph_trap_handler(struct drm_device *dev)
+{
+	struct nouveau_pgraph_trap trap;
+	uint32_t status = nv_rd32(dev, 0x400108);
+	uint32_t ustatus;
+	int display = nouveau_ratelimit();
+
+
+	if (!status && display) {
+		nouveau_graph_trap_info(dev, &trap);
+		nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
+		NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
+	}
+
+	/* DISPATCH: Relays commands to other units and handles NOTIFY,
+	 * COND, QUERY. If you get a trap from it, the command is still stuck
+	 * in DISPATCH and you need to do something about it. */
+	if (status & 0x001) {
+		ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
+		}
+
+		/* Known to be triggered by screwed up NOTIFY and COND... */
+		if (ustatus & 0x00000001) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
+			nv_wr32(dev, 0x400500, 0);
+			if (nv_rd32(dev, 0x400808) & 0x80000000) {
+				if (display) {
+					if (nouveau_graph_trapped_channel(dev, &trap.channel))
+						trap.channel = -1;
+					trap.class = nv_rd32(dev, 0x400814);
+					trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
+					trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
+					trap.data = nv_rd32(dev, 0x40080c);
+					trap.data2 = nv_rd32(dev, 0x400810);
+					nouveau_graph_dump_trap_info(dev,
+							"PGRAPH_TRAP_DISPATCH_FAULT", &trap);
+					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
+					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
+				}
+				nv_wr32(dev, 0x400808, 0);
+			} else if (display) {
+				NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
+			}
+			nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
+			nv_wr32(dev, 0x400848, 0);
+			ustatus &= ~0x00000001;
+		}
+		if (ustatus & 0x00000002) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
+			nv_wr32(dev, 0x400500, 0);
+			if (nv_rd32(dev, 0x40084c) & 0x80000000) {
+				if (display) {
+					if (nouveau_graph_trapped_channel(dev, &trap.channel))
+						trap.channel = -1;
+					trap.class = nv_rd32(dev, 0x400814);
+					trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
+					trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
+					trap.data = nv_rd32(dev, 0x40085c);
+					trap.data2 = 0;
+					nouveau_graph_dump_trap_info(dev,
+							"PGRAPH_TRAP_DISPATCH_QUERY", &trap);
+					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
+				}
+				nv_wr32(dev, 0x40084c, 0);
+			} else if (display) {
+				NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
+			}
+			ustatus &= ~0x00000002;
+		}
+		if (ustatus && display)
+			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
+		nv_wr32(dev, 0x400804, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x001);
+		status &= ~0x001;
+	}
+
+	/* TRAPs other than dispatch use the "normal" trap regs. */
+	if (status && display) {
+		nouveau_graph_trap_info(dev, &trap);
+		nouveau_graph_dump_trap_info(dev,
+				"PGRAPH_TRAP", &trap);
+	}
+
+	/* M2MF: Memory to memory copy engine. */
+	if (status & 0x002) {
+		ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
+		}
+		if (ustatus & 0x00000001) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
+			ustatus &= ~0x00000001;
+		}
+		if (ustatus & 0x00000002) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
+			ustatus &= ~0x00000002;
+		}
+		if (ustatus & 0x00000004) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
+			ustatus &= ~0x00000004;
+		}
+		NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
+				nv_rd32(dev, 0x406804),
+				nv_rd32(dev, 0x406808),
+				nv_rd32(dev, 0x40680c),
+				nv_rd32(dev, 0x406810));
+		if (ustatus && display)
+			NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(dev, 0x400040, 2);
+		nv_wr32(dev, 0x400040, 0);
+		nv_wr32(dev, 0x406800, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x002);
+		status &= ~0x002;
+	}
+
+	/* VFETCH: Fetches data from vertex buffers. */
+	if (status & 0x004) {
+		ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
+		}
+		if (ustatus & 0x00000001) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
+			NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
+					nv_rd32(dev, 0x400c00),
+					nv_rd32(dev, 0x400c08),
+					nv_rd32(dev, 0x400c0c),
+					nv_rd32(dev, 0x400c10));
+			ustatus &= ~0x00000001;
+		}
+		if (ustatus && display)
+			NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
+		nv_wr32(dev, 0x400c04, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x004);
+		status &= ~0x004;
+	}
+
+	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+	if (status & 0x008) {
+		ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
+		}
+		if (ustatus & 0x00000001) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
+			NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
+					nv_rd32(dev, 0x401804),
+					nv_rd32(dev, 0x401808),
+					nv_rd32(dev, 0x40180c),
+					nv_rd32(dev, 0x401810));
+			ustatus &= ~0x00000001;
+		}
+		if (ustatus && display)
+			NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(dev, 0x400040, 0x80);
+		nv_wr32(dev, 0x400040, 0);
+		nv_wr32(dev, 0x401800, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x008);
+		status &= ~0x008;
+	}
+
+	/* CCACHE: Handles code and c[] caches and fills them. */
+	if (status & 0x010) {
+		ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
+		}
+		if (ustatus & 0x00000001) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
+			NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
+					nv_rd32(dev, 0x405800),
+					nv_rd32(dev, 0x405804),
+					nv_rd32(dev, 0x405808),
+					nv_rd32(dev, 0x40580c),
+					nv_rd32(dev, 0x405810),
+					nv_rd32(dev, 0x405814),
+					nv_rd32(dev, 0x40581c));
+			ustatus &= ~0x00000001;
+		}
+		if (ustatus && display)
+			NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
+		nv_wr32(dev, 0x405018, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x010);
+		status &= ~0x010;
+	}
+
+	/* Unknown, not seen yet... 0x402000 is the only trap status reg
+	 * remaining, so try to handle it anyway. Perhaps related to that
+	 * unknown DMA slot on tesla? */
+	if (status & 0x20) {
+		nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
+		ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
+		if (display)
+			NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
+		nv_wr32(dev, 0x402000, 0xc0000000);
+		/* no status modifiction on purpose */
+	}
+
+	/* TEXTURE: CUDA texturing units */
+	if (status & 0x040) {
+		nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
+				"PGRAPH_TRAP_TEXTURE");
+		nv_wr32(dev, 0x400108, 0x040);
+		status &= ~0x040;
+	}
+
+	/* MP: CUDA execution engines. */
+	if (status & 0x080) {
+		nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
+				"PGRAPH_TRAP_MP");
+		nv_wr32(dev, 0x400108, 0x080);
+		status &= ~0x080;
+	}
+
+	/* TPDMA:  Handles TP-initiated uncached memory accesses:
+	 * l[], g[], stack, 2d surfaces, render targets. */
+	if (status & 0x100) {
+		nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
+				"PGRAPH_TRAP_TPDMA");
+		nv_wr32(dev, 0x400108, 0x100);
+		status &= ~0x100;
+	}
+
+	if (status) {
+		if (display)
+			NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
+				status);
+		nv_wr32(dev, 0x400108, status);
+	}
+}
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+static struct nouveau_enum_names nv50_data_error_names[] =
+{
+	{ 4,	"INVALID_VALUE" },
+	{ 5,	"INVALID_ENUM" },
+	{ 8,	"INVALID_OBJECT" },
+	{ 0xc,	"INVALID_BITFIELD" },
+	{ 0x28,	"MP_NO_REG_SPACE" },
+	{ 0x2b,	"MP_BLOCK_SIZE_MISMATCH" },
+};
+
+static void
 nv50_pgraph_irq_handler(struct drm_device *dev)
 {
+	struct nouveau_pgraph_trap trap;
+	int unhandled = 0;
 	uint32_t status;
 
 	while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
-		uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-
+		/* NOTIFY: You've set a NOTIFY an a command and it's done. */
 		if (status & 0x00000001) {
-			nouveau_pgraph_intr_notify(dev, nsource);
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_NOTIFY", &trap);
 			status &= ~0x00000001;
 			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
 		}
 
-		if (status & 0x00000010) {
-			nouveau_pgraph_intr_error(dev, nsource |
-					NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+		/* COMPUTE_QUERY: Purpose and exact cause unknown, happens
+		 * when you write 0x200 to 0x50c0 method 0x31c. */
+		if (status & 0x00000002) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_COMPUTE_QUERY", &trap);
+			status &= ~0x00000002;
+			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
+		}
 
+		/* Unknown, never seen: 0x4 */
+
+		/* ILLEGAL_MTHD: You used a wrong method for this class. */
+		if (status & 0x00000010) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_pgraph_intr_swmthd(dev, &trap))
+				unhandled = 1;
+			if (unhandled && nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_ILLEGAL_MTHD", &trap);
 			status &= ~0x00000010;
 			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
 		}
 
+		/* ILLEGAL_CLASS: You used a wrong class. */
+		if (status & 0x00000020) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_ILLEGAL_CLASS", &trap);
+			status &= ~0x00000020;
+			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
+		}
+
+		/* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
+		if (status & 0x00000040) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_DOUBLE_NOTIFY", &trap);
+			status &= ~0x00000040;
+			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
+		}
+
+		/* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
 		if (status & 0x00001000) {
 			nv_wr32(dev, 0x400500, 0x00000000);
 			nv_wr32(dev, NV03_PGRAPH_INTR,
@@ -613,49 +1115,59 @@
 			status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
 		}
 
-		if (status & 0x00100000) {
-			nouveau_pgraph_intr_error(dev, nsource |
-					NV03_PGRAPH_NSOURCE_DATA_ERROR);
+		/* BUFFER_NOTIFY: Your m2mf transfer finished */
+		if (status & 0x00010000) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_BUFFER_NOTIFY", &trap);
+			status &= ~0x00010000;
+			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
+		}
 
+		/* DATA_ERROR: Invalid value for this method, or invalid
+		 * state in current PGRAPH context for this operation */
+		if (status & 0x00100000) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit()) {
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_DATA_ERROR", &trap);
+				NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
+				nouveau_print_enum_names(nv_rd32(dev, 0x400110),
+						nv50_data_error_names);
+				printk("\n");
+			}
 			status &= ~0x00100000;
 			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
 		}
 
+		/* TRAP: Something bad happened in the middle of command
+		 * execution.  Has a billion types, subtypes, and even
+		 * subsubtypes. */
 		if (status & 0x00200000) {
-			int r;
-
-			nouveau_pgraph_intr_error(dev, nsource |
-					NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
-
-			NV_ERROR(dev, "magic set 1:\n");
-			for (r = 0x408900; r <= 0x408910; r += 4)
-				NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
-					nv_rd32(dev, r));
-			nv_wr32(dev, 0x408900,
-				nv_rd32(dev, 0x408904) | 0xc0000000);
-			for (r = 0x408e08; r <= 0x408e24; r += 4)
-				NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
-							nv_rd32(dev, r));
-			nv_wr32(dev, 0x408e08,
-				nv_rd32(dev, 0x408e08) | 0xc0000000);
-
-			NV_ERROR(dev, "magic set 2:\n");
-			for (r = 0x409900; r <= 0x409910; r += 4)
-				NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
-					nv_rd32(dev, r));
-			nv_wr32(dev, 0x409900,
-				nv_rd32(dev, 0x409904) | 0xc0000000);
-			for (r = 0x409e08; r <= 0x409e24; r += 4)
-				NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
-					nv_rd32(dev, r));
-			nv_wr32(dev, 0x409e08,
-				nv_rd32(dev, 0x409e08) | 0xc0000000);
-
+			nv50_pgraph_trap_handler(dev);
 			status &= ~0x00200000;
-			nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
 			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
 		}
 
+		/* Unknown, never seen: 0x00400000 */
+
+		/* SINGLE_STEP: Happens on every method if you turned on
+		 * single stepping in 40008c */
+		if (status & 0x01000000) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_SINGLE_STEP", &trap);
+			status &= ~0x01000000;
+			nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
+		}
+
+		/* 0x02000000 happens when you pause a ctxprog...
+		 * but the only way this can happen that I know is by
+		 * poking the relevant MMIO register, and we don't
+		 * do that. */
+
 		if (status) {
 			NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
 				status);
@@ -672,7 +1184,8 @@
 	}
 
 	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
-	nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+	if (nv_rd32(dev, 0x400824) & (1 << 31))
+		nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index eb8f084..58b4680 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -35,7 +35,6 @@
 #include "nouveau_drm.h"
 #include "nv50_display.h"
 
-static int nouveau_stub_init(struct drm_device *dev) { return 0; }
 static void nouveau_stub_takedown(struct drm_device *dev) {}
 
 static int nouveau_init_engine_ptrs(struct drm_device *dev)
@@ -277,8 +276,8 @@
 		engine->timer.init		= nv04_timer_init;
 		engine->timer.read		= nv04_timer_read;
 		engine->timer.takedown		= nv04_timer_takedown;
-		engine->fb.init			= nouveau_stub_init;
-		engine->fb.takedown		= nouveau_stub_takedown;
+		engine->fb.init			= nv50_fb_init;
+		engine->fb.takedown		= nv50_fb_takedown;
 		engine->graph.grclass		= nv50_graph_grclass;
 		engine->graph.init		= nv50_graph_init;
 		engine->graph.takedown		= nv50_graph_takedown;
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index a1d1ebb..eba687f1 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -230,9 +230,9 @@
 	struct drm_framebuffer *fb = crtc->fb;
 
 	/* Calculate our timings */
-	int horizDisplay	= (mode->crtc_hdisplay >> 3) 	- 1;
-	int horizStart		= (mode->crtc_hsync_start >> 3) 	- 1;
-	int horizEnd		= (mode->crtc_hsync_end >> 3) 	- 1;
+	int horizDisplay	= (mode->crtc_hdisplay >> 3)		- 1;
+	int horizStart		= (mode->crtc_hsync_start >> 3) 	+ 1;
+	int horizEnd		= (mode->crtc_hsync_end >> 3)		+ 1;
 	int horizTotal		= (mode->crtc_htotal >> 3)		- 5;
 	int horizBlankStart	= (mode->crtc_hdisplay >> 3)		- 1;
 	int horizBlankEnd	= (mode->crtc_htotal >> 3)		- 1;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 3da90c2..813b25c 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -118,8 +118,8 @@
 		return;
 	}
 
-	width = ALIGN(image->width, 32);
-	dsize = (width * image->height) >> 5;
+	width = ALIGN(image->width, 8);
+	dsize = ALIGN(width * image->height, 32) >> 5;
 
 	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
 	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -136,8 +136,8 @@
 			 ((image->dx + image->width) & 0xffff));
 	OUT_RING(chan, bg);
 	OUT_RING(chan, fg);
-	OUT_RING(chan, (image->height << 16) | image->width);
 	OUT_RING(chan, (image->height << 16) | width);
+	OUT_RING(chan, (image->height << 16) | image->width);
 	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
 
 	while (dsize) {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 61a89f2..fac6c88 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -522,8 +522,8 @@
 	}
 
 	for (i = 0 ; i < dcb->connector.entries; i++) {
-		if (i != 0 && dcb->connector.entry[i].index ==
-			      dcb->connector.entry[i - 1].index)
+		if (i != 0 && dcb->connector.entry[i].index2 ==
+			      dcb->connector.entry[i - 1].index2)
 			continue;
 		nouveau_connector_create(dev, &dcb->connector.entry[i]);
 	}
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
new file mode 100644
index 0000000..a95e694
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -0,0 +1,32 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv50_fb_init(struct drm_device *dev)
+{
+	/* This is needed to get meaningful information from 100c90
+	 * on traps. No idea what these values mean exactly. */
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	switch (dev_priv->chipset) {
+	case 0x50:
+		nv_wr32(dev, 0x100c90, 0x0707ff);
+		break;
+	case 0xa5:
+	case 0xa8:
+		nv_wr32(dev, 0x100c90, 0x0d0fff);
+		break;
+	default:
+		nv_wr32(dev, 0x100c90, 0x1d07ff);
+		break;
+	}
+
+	return 0;
+}
+
+void
+nv50_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 993c712..25a3cd8 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -233,7 +233,7 @@
 	BEGIN_RING(chan, NvSub2D, 0x0808, 3);
 	OUT_RING(chan, 0);
 	OUT_RING(chan, 0);
-	OUT_RING(chan, 0);
+	OUT_RING(chan, 1);
 	BEGIN_RING(chan, NvSub2D, 0x081c, 1);
 	OUT_RING(chan, 1);
 	BEGIN_RING(chan, NvSub2D, 0x0840, 4);
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 857a096..c62b33a 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -56,6 +56,10 @@
 static void
 nv50_graph_init_regs__nv(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t units = nv_rd32(dev, 0x1540);
+	int i;
+
 	NV_DEBUG(dev, "\n");
 
 	nv_wr32(dev, 0x400804, 0xc0000000);
@@ -65,6 +69,20 @@
 	nv_wr32(dev, 0x405018, 0xc0000000);
 	nv_wr32(dev, 0x402000, 0xc0000000);
 
+	for (i = 0; i < 16; i++) {
+		if (units & 1 << i) {
+			if (dev_priv->chipset < 0xa0) {
+				nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
+				nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
+				nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
+			} else {
+				nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
+				nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
+				nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
+			}
+		}
+	}
+
 	nv_wr32(dev, 0x400108, 0xffffffff);
 
 	nv_wr32(dev, 0x400824, 0x00004000);
@@ -229,10 +247,6 @@
 		nouveau_grctx_vals_load(dev, ctx);
 	}
 	nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
-	if ((dev_priv->chipset & 0xf0) == 0xa0)
-		nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
-	else
-		nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
 	dev_priv->engine.instmem.finish_access(dev);
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index d105fcd..546b319 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -64,6 +64,9 @@
 #define CP_FLAG_ALWAYS                ((2 * 32) + 13)
 #define CP_FLAG_ALWAYS_FALSE          0
 #define CP_FLAG_ALWAYS_TRUE           1
+#define CP_FLAG_INTR                  ((2 * 32) + 15)
+#define CP_FLAG_INTR_NOT_PENDING      0
+#define CP_FLAG_INTR_PENDING          1
 
 #define CP_CTX                   0x00100000
 #define CP_CTX_COUNT             0x000f0000
@@ -214,6 +217,8 @@
 	cp_name(ctx, cp_setup_save);
 	cp_set (ctx, UNK1D, SET);
 	cp_wait(ctx, STATUS, BUSY);
+	cp_wait(ctx, INTR, PENDING);
+	cp_bra (ctx, STATUS, BUSY, cp_setup_save);
 	cp_set (ctx, UNK01, SET);
 	cp_set (ctx, SWAP_DIRECTION, SAVE);
 
@@ -269,7 +274,7 @@
 	int offset, base;
 	uint32_t units = nv_rd32 (ctx->dev, 0x1540);
 
-	/* 0800 */
+	/* 0800: DISPATCH */
 	cp_ctx(ctx, 0x400808, 7);
 	gr_def(ctx, 0x400814, 0x00000030);
 	cp_ctx(ctx, 0x400834, 0x32);
@@ -300,7 +305,7 @@
 		gr_def(ctx, 0x400b20, 0x0001629d);
 	}
 
-	/* 0C00 */
+	/* 0C00: VFETCH */
 	cp_ctx(ctx, 0x400c08, 0x2);
 	gr_def(ctx, 0x400c08, 0x0000fe0c);
 
@@ -326,7 +331,7 @@
 	cp_ctx(ctx, 0x401540, 0x5);
 	gr_def(ctx, 0x401550, 0x00001018);
 
-	/* 1800 */
+	/* 1800: STREAMOUT */
 	cp_ctx(ctx, 0x401814, 0x1);
 	gr_def(ctx, 0x401814, 0x000000ff);
 	if (dev_priv->chipset == 0x50) {
@@ -641,7 +646,7 @@
 	if (dev_priv->chipset == 0x50)
 		cp_ctx(ctx, 0x4063e0, 0x1);
 
-	/* 6800 */
+	/* 6800: M2MF */
 	if (dev_priv->chipset < 0x90) {
 		cp_ctx(ctx, 0x406814, 0x2b);
 		gr_def(ctx, 0x406818, 0x00000f80);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index ed38262..3c91312 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -50,7 +50,7 @@
 radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
 	radeon_irq.o r300_cmdbuf.o r600_cp.o
 # add KMS driver
-radeon-y += radeon_device.o radeon_kms.o \
+radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
 	radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
 	atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
 	radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d75788f..247f8ee 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -52,15 +52,17 @@
 
 typedef struct {
 	struct atom_context *ctx;
-
 	uint32_t *ps, *ws;
 	int ps_shift;
 	uint16_t start;
+	unsigned last_jump;
+	unsigned long last_jump_jiffies;
+	bool abort;
 } atom_exec_context;
 
 int atom_debug = 0;
-static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
-void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
 
 static uint32_t atom_arg_mask[8] =
     { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
@@ -604,12 +606,17 @@
 static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
 {
 	int idx = U8((*ptr)++);
+	int r = 0;
+
 	if (idx < ATOM_TABLE_NAMES_CNT)
 		SDEBUG("   table: %d (%s)\n", idx, atom_table_names[idx]);
 	else
 		SDEBUG("   table: %d\n", idx);
 	if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
-		atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+		r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+	if (r) {
+		ctx->abort = true;
+	}
 }
 
 static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@@ -673,6 +680,8 @@
 static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
 {
 	int execute = 0, target = U16(*ptr);
+	unsigned long cjiffies;
+
 	(*ptr) += 2;
 	switch (arg) {
 	case ATOM_COND_ABOVE:
@@ -700,8 +709,25 @@
 	if (arg != ATOM_COND_ALWAYS)
 		SDEBUG("   taken: %s\n", execute ? "yes" : "no");
 	SDEBUG("   target: 0x%04X\n", target);
-	if (execute)
+	if (execute) {
+		if (ctx->last_jump == (ctx->start + target)) {
+			cjiffies = jiffies;
+			if (time_after(cjiffies, ctx->last_jump_jiffies)) {
+				cjiffies -= ctx->last_jump_jiffies;
+				if ((jiffies_to_msecs(cjiffies) > 1000)) {
+					DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n");
+					ctx->abort = true;
+				}
+			} else {
+				/* jiffies wrap around we will just wait a little longer */
+				ctx->last_jump_jiffies = jiffies;
+			}
+		} else {
+			ctx->last_jump = ctx->start + target;
+			ctx->last_jump_jiffies = jiffies;
+		}
 		*ptr = ctx->start + target;
+	}
 }
 
 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
@@ -1104,7 +1130,7 @@
 	atom_op_shr, ATOM_ARG_MC}, {
 atom_op_debug, 0},};
 
-static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
 {
 	int base = CU16(ctx->cmd_table + 4 + 2 * index);
 	int len, ws, ps, ptr;
@@ -1112,7 +1138,7 @@
 	atom_exec_context ectx;
 
 	if (!base)
-		return;
+		return -EINVAL;
 
 	len = CU16(base + ATOM_CT_SIZE_PTR);
 	ws = CU8(base + ATOM_CT_WS_PTR);
@@ -1125,6 +1151,8 @@
 	ectx.ps_shift = ps / 4;
 	ectx.start = base;
 	ectx.ps = params;
+	ectx.abort = false;
+	ectx.last_jump = 0;
 	if (ws)
 		ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
 	else
@@ -1137,6 +1165,11 @@
 			SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
 		else
 			SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
+		if (ectx.abort) {
+			DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
+				base, len, ws, ps, ptr - 1);
+			return -EINVAL;
+		}
 
 		if (op < ATOM_OP_CNT && op > 0)
 			opcode_table[op].func(&ectx, &ptr,
@@ -1152,10 +1185,13 @@
 
 	if (ws)
 		kfree(ectx.ws);
+	return 0;
 }
 
-void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
 {
+	int r;
+
 	mutex_lock(&ctx->mutex);
 	/* reset reg block */
 	ctx->reg_block = 0;
@@ -1163,8 +1199,9 @@
 	ctx->fb_base = 0;
 	/* reset io mode */
 	ctx->io_mode = ATOM_IO_MM;
-	atom_execute_table_locked(ctx, index, params);
+	r = atom_execute_table_locked(ctx, index, params);
 	mutex_unlock(&ctx->mutex);
+	return r;
 }
 
 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
@@ -1248,9 +1285,7 @@
 
 	if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
 		return 1;
-	atom_execute_table(ctx, ATOM_CMD_INIT, ps);
-
-	return 0;
+	return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
 }
 
 void atom_destroy(struct atom_context *ctx)
@@ -1260,12 +1295,16 @@
 	kfree(ctx);
 }
 
-void atom_parse_data_header(struct atom_context *ctx, int index,
+bool atom_parse_data_header(struct atom_context *ctx, int index,
 			    uint16_t * size, uint8_t * frev, uint8_t * crev,
 			    uint16_t * data_start)
 {
 	int offset = index * 2 + 4;
 	int idx = CU16(ctx->data_table + offset);
+	u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
+
+	if (!mdt[index])
+		return false;
 
 	if (size)
 		*size = CU16(idx);
@@ -1274,38 +1313,42 @@
 	if (crev)
 		*crev = CU8(idx + 3);
 	*data_start = idx;
-	return;
+	return true;
 }
 
-void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
+bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
 			   uint8_t * crev)
 {
 	int offset = index * 2 + 4;
 	int idx = CU16(ctx->cmd_table + offset);
+	u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
+
+	if (!mct[index])
+		return false;
 
 	if (frev)
 		*frev = CU8(idx + 2);
 	if (crev)
 		*crev = CU8(idx + 3);
-	return;
+	return true;
 }
 
 int atom_allocate_fb_scratch(struct atom_context *ctx)
 {
 	int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
 	uint16_t data_offset;
-	int usage_bytes;
+	int usage_bytes = 0;
 	struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
 
-	atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
+	if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+		firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
 
-	firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+		DRM_DEBUG("atom firmware requested %08x %dkb\n",
+			  firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
+			  firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
 
-	DRM_DEBUG("atom firmware requested %08x %dkb\n",
-		  firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
-		  firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
-
-	usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+		usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+	}
 	if (usage_bytes == 0)
 		usage_bytes = 20 * 1024;
 	/* allocate some scratch memory */
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index bc73781..cd1b64a 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -140,11 +140,13 @@
 extern int atom_debug;
 
 struct atom_context *atom_parse(struct card_info *, void *);
-void atom_execute_table(struct atom_context *, int, uint32_t *);
+int atom_execute_table(struct atom_context *, int, uint32_t *);
 int atom_asic_init(struct atom_context *);
 void atom_destroy(struct atom_context *);
-void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
-void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
+bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
+			    uint8_t *frev, uint8_t *crev, uint16_t *data_start);
+bool atom_parse_cmd_header(struct atom_context *ctx, int index,
+			   uint8_t *frev, uint8_t *crev);
 int atom_allocate_fb_scratch(struct atom_context *ctx);
 #include "atom-types.h"
 #include "atombios.h"
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dd9fdf5..fd4ef6d 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -353,12 +353,55 @@
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
 
+static void atombios_disable_ss(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u32 ss_cntl;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		switch (radeon_crtc->pll_id) {
+		case ATOM_PPLL1:
+			ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
+			ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+			WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_PPLL2:
+			ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
+			ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+			WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_DCPLL:
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		switch (radeon_crtc->pll_id) {
+		case ATOM_PPLL1:
+			ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
+			ss_cntl &= ~1;
+			WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_PPLL2:
+			ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
+			ss_cntl &= ~1;
+			WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_DCPLL:
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+	}
+}
+
+
 union atom_enable_ss {
 	ENABLE_LVDS_SS_PARAMETERS legacy;
 	ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
 };
 
-static void atombios_set_ss(struct drm_crtc *crtc, int enable)
+static void atombios_enable_ss(struct drm_crtc *crtc)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -387,9 +430,9 @@
 					step = dig->ss->step;
 					delay = dig->ss->delay;
 					range = dig->ss->range;
-				} else if (enable)
+				} else
 					return;
-			} else if (enable)
+			} else
 				return;
 			break;
 		}
@@ -406,13 +449,13 @@
 		args.v1.ucSpreadSpectrumDelay = delay;
 		args.v1.ucSpreadSpectrumRange = range;
 		args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
-		args.v1.ucEnable = enable;
+		args.v1.ucEnable = ATOM_ENABLE;
 	} else {
 		args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
 		args.legacy.ucSpreadSpectrumType = type;
 		args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
 		args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
-		args.legacy.ucEnable = enable;
+		args.legacy.ucEnable = ATOM_ENABLE;
 	}
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
@@ -478,11 +521,6 @@
 				/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
 				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
 					adjusted_clock = mode->clock * 2;
-				/* LVDS PLL quirks */
-				if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
-					struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-					pll->algo = dig->pll_algo;
-				}
 			} else {
 				if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
 					pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -503,8 +541,9 @@
 		int index;
 
 		index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
-		atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
-				      &crev);
+		if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+					   &crev))
+			return adjusted_clock;
 
 		memset(&args, 0, sizeof(args));
 
@@ -542,11 +581,16 @@
 					}
 				} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
 					/* may want to enable SS on DP/eDP eventually */
-					args.v3.sInput.ucDispPllConfig |=
-						DISPPLL_CONFIG_SS_ENABLE;
-					if (mode->clock > 165000)
+					/*args.v3.sInput.ucDispPllConfig |=
+						DISPPLL_CONFIG_SS_ENABLE;*/
+					if (encoder_mode == ATOM_ENCODER_MODE_DP)
 						args.v3.sInput.ucDispPllConfig |=
-							DISPPLL_CONFIG_DUAL_LINK;
+							DISPPLL_CONFIG_COHERENT_MODE;
+					else {
+						if (mode->clock > 165000)
+							args.v3.sInput.ucDispPllConfig |=
+								DISPPLL_CONFIG_DUAL_LINK;
+					}
 				}
 				atom_execute_table(rdev->mode_info.atom_context,
 						   index, (uint32_t *)&args);
@@ -592,8 +636,9 @@
 	memset(&args, 0, sizeof(args));
 
 	index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
-	atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
-			      &crev);
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+				   &crev))
+		return;
 
 	switch (frev) {
 	case 1:
@@ -667,8 +712,9 @@
 			   &ref_div, &post_div);
 
 	index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
-	atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
-			      &crev);
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+				   &crev))
+		return;
 
 	switch (frev) {
 	case 1:
@@ -1083,15 +1129,12 @@
 
 	/* TODO color tiling */
 
-	/* pick pll */
-	radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
-
-	atombios_set_ss(crtc, 0);
+	atombios_disable_ss(crtc);
 	/* always set DCPLL */
 	if (ASIC_IS_DCE4(rdev))
 		atombios_crtc_set_dcpll(crtc);
 	atombios_crtc_set_pll(crtc, adjusted_mode);
-	atombios_set_ss(crtc, 1);
+	atombios_enable_ss(crtc);
 
 	if (ASIC_IS_DCE4(rdev))
 		atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
@@ -1120,6 +1163,11 @@
 
 static void atombios_crtc_prepare(struct drm_crtc *crtc)
 {
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	/* pick pll */
+	radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+
 	atombios_lock_crtc(crtc, ATOM_ENABLE);
 	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 }
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 8a133bd..28b31c6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -745,14 +745,14 @@
 			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
 
 	/* disable the training pattern on the sink */
+	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+
+	/* disable the training pattern on the source */
 	if (ASIC_IS_DCE4(rdev))
 		atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
 	else
 		radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
 					  dig_connector->dp_clock, enc_id, 0);
-
-	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
-				  dig_connector->dp_clock, enc_id, 0);
 }
 
 int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index bd2e7aa..647a0ef 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -25,6 +25,7 @@
 #include <linux/platform_device.h>
 #include "drmP.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "radeon_drm.h"
 #include "rv770d.h"
 #include "atom.h"
@@ -436,7 +437,6 @@
 
 int evergreen_mc_init(struct radeon_device *rdev)
 {
-	fixed20_12 a;
 	u32 tmp;
 	int chansize, numchan;
 
@@ -481,12 +481,8 @@
 		rdev->mc.real_vram_size = rdev->mc.aper_size;
 	}
 	r600_vram_gtt_location(rdev, &rdev->mc);
-	/* FIXME: we should enforce default clock in case GPU is not in
-	 * default setup
-	 */
-	a.full = rfixed_const(100);
-	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+	radeon_update_bandwidth_info(rdev);
+
 	return 0;
 }
 
@@ -746,6 +742,7 @@
 
 void evergreen_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	evergreen_suspend(rdev);
 #if 0
 	r600_blit_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 91eb762..3ae51ad 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -31,6 +31,7 @@
 #include "radeon_drm.h"
 #include "radeon_reg.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "r100d.h"
 #include "rs100d.h"
 #include "rv200d.h"
@@ -235,9 +236,9 @@
 
 void r100_pci_gart_fini(struct radeon_device *rdev)
 {
+	radeon_gart_fini(rdev);
 	r100_pci_gart_disable(rdev);
 	radeon_gart_table_ram_free(rdev);
-	radeon_gart_fini(rdev);
 }
 
 int r100_irq_set(struct radeon_device *rdev)
@@ -312,10 +313,12 @@
 		/* Vertical blank interrupts */
 		if (status & RADEON_CRTC_VBLANK_STAT) {
 			drm_handle_vblank(rdev->ddev, 0);
+			rdev->pm.vblank_sync = true;
 			wake_up(&rdev->irq.vblank_queue);
 		}
 		if (status & RADEON_CRTC2_VBLANK_STAT) {
 			drm_handle_vblank(rdev->ddev, 1);
+			rdev->pm.vblank_sync = true;
 			wake_up(&rdev->irq.vblank_queue);
 		}
 		if (status & RADEON_FP_DETECT_STAT) {
@@ -741,6 +744,8 @@
 	udelay(10);
 	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
 	rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
+	/* protect against crazy HW on resume */
+	rdev->cp.wptr &= rdev->cp.ptr_mask;
 	/* Set cp mode to bus mastering & enable cp*/
 	WREG32(RADEON_CP_CSQ_MODE,
 	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1804,6 +1809,7 @@
 {
 	struct drm_device *dev = rdev->ddev;
 	bool force_dac2 = false;
+	u32 tmp;
 
 	/* set these so they don't interfere with anything */
 	WREG32(RADEON_OV0_SCALE_CNTL, 0);
@@ -1875,6 +1881,12 @@
 		WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
 		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
 	}
+
+	/* switch PM block to ACPI mode */
+	tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+	tmp &= ~RADEON_PM_MODE_SEL;
+	WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+
 }
 
 /*
@@ -2022,6 +2034,7 @@
 	radeon_vram_location(rdev, &rdev->mc, base);
 	if (!(rdev->flags & RADEON_IS_AGP))
 		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
 }
 
 
@@ -2385,6 +2398,8 @@
 	uint32_t pixel_bytes1 = 0;
 	uint32_t pixel_bytes2 = 0;
 
+	radeon_update_display_priority(rdev);
+
 	if (rdev->mode_info.crtcs[0]->base.enabled) {
 		mode1 = &rdev->mode_info.crtcs[0]->base.mode;
 		pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
@@ -2413,11 +2428,8 @@
 	/*
 	 * determine is there is enough bw for current mode
 	 */
-	mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
-	temp_ff.full = rfixed_const(100);
-	mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
-	sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
-	sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
+	sclk_ff = rdev->pm.sclk;
+	mclk_ff = rdev->pm.mclk;
 
 	temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
 	temp_ff.full = rfixed_const(temp);
@@ -3440,6 +3452,7 @@
 
 void r100_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 1146c99..85617c3 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -30,6 +30,7 @@
 #include "radeon_drm.h"
 #include "radeon_reg.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 
 #include "r100d.h"
 #include "r200_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 4cef90c..1023eeb 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -30,6 +30,7 @@
 #include "drm.h"
 #include "radeon_reg.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "radeon_drm.h"
 #include "r100_track.h"
 #include "r300d.h"
@@ -164,9 +165,9 @@
 
 void rv370_pcie_gart_fini(struct radeon_device *rdev)
 {
+	radeon_gart_fini(rdev);
 	rv370_pcie_gart_disable(rdev);
 	radeon_gart_table_vram_free(rdev);
-	radeon_gart_fini(rdev);
 }
 
 void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -481,6 +482,7 @@
 	radeon_vram_location(rdev, &rdev->mc, base);
 	if (!(rdev->flags & RADEON_IS_AGP))
 		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
 }
 
 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@@ -1334,6 +1336,7 @@
 
 void r300_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c7593b8..0b8603c 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -29,6 +29,7 @@
 #include "drmP.h"
 #include "radeon_reg.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "atom.h"
 #include "r100d.h"
 #include "r420d.h"
@@ -266,6 +267,7 @@
 
 void r420_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 2b8a5dd..3c44b8d 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -27,6 +27,7 @@
  */
 #include "drmP.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "atom.h"
 #include "r520d.h"
 
@@ -121,19 +122,13 @@
 
 void r520_mc_init(struct radeon_device *rdev)
 {
-	fixed20_12 a;
 
 	r520_vram_get_type(rdev);
 	r100_vram_init_sizes(rdev);
 	radeon_vram_location(rdev, &rdev->mc, 0);
 	if (!(rdev->flags & RADEON_IS_AGP))
 		radeon_gtt_location(rdev, &rdev->mc);
-	/* FIXME: we should enforce default clock in case GPU is not in
-	 * default setup
-	 */
-	a.full = rfixed_const(100);
-	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+	radeon_update_bandwidth_info(rdev);
 }
 
 void r520_mc_program(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c522901..5509354 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -31,6 +31,7 @@
 #include "drmP.h"
 #include "radeon_drm.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "radeon_mode.h"
 #include "r600d.h"
 #include "atom.h"
@@ -491,9 +492,9 @@
 
 void r600_pcie_gart_fini(struct radeon_device *rdev)
 {
+	radeon_gart_fini(rdev);
 	r600_pcie_gart_disable(rdev);
 	radeon_gart_table_vram_free(rdev);
-	radeon_gart_fini(rdev);
 }
 
 void r600_agp_enable(struct radeon_device *rdev)
@@ -675,7 +676,6 @@
 
 int r600_mc_init(struct radeon_device *rdev)
 {
-	fixed20_12 a;
 	u32 tmp;
 	int chansize, numchan;
 
@@ -719,14 +719,10 @@
 		rdev->mc.real_vram_size = rdev->mc.aper_size;
 	}
 	r600_vram_gtt_location(rdev, &rdev->mc);
-	/* FIXME: we should enforce default clock in case GPU is not in
-	 * default setup
-	 */
-	a.full = rfixed_const(100);
-	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+
 	if (rdev->flags & RADEON_IS_IGP)
 		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+	radeon_update_bandwidth_info(rdev);
 	return 0;
 }
 
@@ -1132,6 +1128,7 @@
 	/* Setup pipes */
 	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
 	WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
 
 	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
 	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -2119,6 +2116,7 @@
 
 void r600_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r600_audio_fini(rdev);
 	r600_blit_fini(rdev);
 	r600_cp_fini(rdev);
@@ -2398,19 +2396,19 @@
 		WREG32(DC_HPD4_INT_CONTROL, tmp);
 		if (ASIC_IS_DCE32(rdev)) {
 			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-			WREG32(DC_HPD5_INT_CONTROL, 0);
+			WREG32(DC_HPD5_INT_CONTROL, tmp);
 			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-			WREG32(DC_HPD6_INT_CONTROL, 0);
+			WREG32(DC_HPD6_INT_CONTROL, tmp);
 		}
 	} else {
 		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
 		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
 		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
-		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
+		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
 		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
-		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
+		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
 		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
-		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
+		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
 	}
 }
 
@@ -2765,6 +2763,7 @@
 			case 0: /* D1 vblank */
 				if (disp_int & LB_D1_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 0);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int &= ~LB_D1_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D1 vblank\n");
@@ -2786,6 +2785,7 @@
 			case 0: /* D2 vblank */
 				if (disp_int & LB_D2_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 1);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int &= ~LB_D2_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D2 vblank\n");
@@ -2834,14 +2834,14 @@
 				break;
 			case 10:
 				if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
-					disp_int_cont &= ~DC_HPD5_INTERRUPT;
+					disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD5\n");
 				}
 				break;
 			case 12:
 				if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
-					disp_int_cont &= ~DC_HPD6_INTERRUPT;
+					disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD6\n");
 				}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index db92801..dac7042 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -182,41 +182,6 @@
 }
 
 /*
- * determin how the encoders and audio interface is wired together
- */
-int r600_audio_tmds_index(struct drm_encoder *encoder)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct drm_encoder *other;
-
-	switch (radeon_encoder->encoder_id) {
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-		return 0;
-
-	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-		/* special case check if an TMDS1 is present */
-		list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
-			if (to_radeon_encoder(other)->encoder_id ==
-				ENCODER_OBJECT_ID_INTERNAL_TMDS1)
-				return 1;
-		}
-		return 0;
-
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-		return 1;
-
-	default:
-		DRM_ERROR("Unsupported encoder type 0x%02X\n",
-			  radeon_encoder->encoder_id);
-		return -1;
-	}
-}
-
-/*
  * atach the audio codec to the clock source of the encoder
  */
 void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
@@ -224,6 +189,7 @@
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 	int base_rate = 48000;
 
 	switch (radeon_encoder->encoder_id) {
@@ -231,32 +197,34 @@
 	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
 		WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
 		break;
-
 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
 		WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
 		break;
-
 	default:
 		DRM_ERROR("Unsupported encoder type 0x%02X\n",
 			  radeon_encoder->encoder_id);
 		return;
 	}
 
-	switch (r600_audio_tmds_index(encoder)) {
+	switch (dig->dig_encoder) {
 	case 0:
-		WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
-		WREG32(R600_AUDIO_PLL1_DIV, clock*100);
+		WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
+		WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
 		WREG32(R600_AUDIO_CLK_SRCSEL, 0);
 		break;
 
 	case 1:
-		WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
-		WREG32(R600_AUDIO_PLL2_DIV, clock*100);
+		WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
+		WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
 		WREG32(R600_AUDIO_CLK_SRCSEL, 1);
 		break;
+	default:
+		dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
+			  radeon_encoder->encoder_id);
+		return;
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index a112c59..0271b53 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -1,7 +1,42 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
 
 #include <linux/types.h>
 #include <linux/kernel.h>
 
+/*
+ * R6xx+ cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup.  Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables.  The regsiter state and shaders
+ * were hand generated to support blitting functionality.  See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
 const u32 r6xx_default_state[] =
 {
 	0xc0002400,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 40416c0..68e6f43 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1548,10 +1548,13 @@
 
 	RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
 	RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
+	RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
 
 	RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
 	RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
 	RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
+	RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
+	RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
 
 	num_qd_pipes =
 		R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index cd2c63b..c39c1bc 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -45,6 +45,7 @@
 	u32			nbanks;
 	u32			npipes;
 	/* value we track */
+	u32			sq_config;
 	u32			nsamples;
 	u32			cb_color_base_last[8];
 	struct radeon_bo	*cb_color_bo[8];
@@ -141,6 +142,8 @@
 {
 	int i;
 
+	/* assume DX9 mode */
+	track->sq_config = DX9_CONSTS;
 	for (i = 0; i < 8; i++) {
 		track->cb_color_base_last[i] = 0;
 		track->cb_color_size[i] = 0;
@@ -715,6 +718,9 @@
 		tmp =radeon_get_ib_value(p, idx);
 		ib[idx] = 0;
 		break;
+	case SQ_CONFIG:
+		track->sq_config = radeon_get_ib_value(p, idx);
+		break;
 	case R_028800_DB_DEPTH_CONTROL:
 		track->db_depth_control = radeon_get_ib_value(p, idx);
 		break;
@@ -869,6 +875,54 @@
 	case SQ_PGM_START_VS:
 	case SQ_PGM_START_GS:
 	case SQ_PGM_START_PS:
+	case SQ_ALU_CONST_CACHE_GS_0:
+	case SQ_ALU_CONST_CACHE_GS_1:
+	case SQ_ALU_CONST_CACHE_GS_2:
+	case SQ_ALU_CONST_CACHE_GS_3:
+	case SQ_ALU_CONST_CACHE_GS_4:
+	case SQ_ALU_CONST_CACHE_GS_5:
+	case SQ_ALU_CONST_CACHE_GS_6:
+	case SQ_ALU_CONST_CACHE_GS_7:
+	case SQ_ALU_CONST_CACHE_GS_8:
+	case SQ_ALU_CONST_CACHE_GS_9:
+	case SQ_ALU_CONST_CACHE_GS_10:
+	case SQ_ALU_CONST_CACHE_GS_11:
+	case SQ_ALU_CONST_CACHE_GS_12:
+	case SQ_ALU_CONST_CACHE_GS_13:
+	case SQ_ALU_CONST_CACHE_GS_14:
+	case SQ_ALU_CONST_CACHE_GS_15:
+	case SQ_ALU_CONST_CACHE_PS_0:
+	case SQ_ALU_CONST_CACHE_PS_1:
+	case SQ_ALU_CONST_CACHE_PS_2:
+	case SQ_ALU_CONST_CACHE_PS_3:
+	case SQ_ALU_CONST_CACHE_PS_4:
+	case SQ_ALU_CONST_CACHE_PS_5:
+	case SQ_ALU_CONST_CACHE_PS_6:
+	case SQ_ALU_CONST_CACHE_PS_7:
+	case SQ_ALU_CONST_CACHE_PS_8:
+	case SQ_ALU_CONST_CACHE_PS_9:
+	case SQ_ALU_CONST_CACHE_PS_10:
+	case SQ_ALU_CONST_CACHE_PS_11:
+	case SQ_ALU_CONST_CACHE_PS_12:
+	case SQ_ALU_CONST_CACHE_PS_13:
+	case SQ_ALU_CONST_CACHE_PS_14:
+	case SQ_ALU_CONST_CACHE_PS_15:
+	case SQ_ALU_CONST_CACHE_VS_0:
+	case SQ_ALU_CONST_CACHE_VS_1:
+	case SQ_ALU_CONST_CACHE_VS_2:
+	case SQ_ALU_CONST_CACHE_VS_3:
+	case SQ_ALU_CONST_CACHE_VS_4:
+	case SQ_ALU_CONST_CACHE_VS_5:
+	case SQ_ALU_CONST_CACHE_VS_6:
+	case SQ_ALU_CONST_CACHE_VS_7:
+	case SQ_ALU_CONST_CACHE_VS_8:
+	case SQ_ALU_CONST_CACHE_VS_9:
+	case SQ_ALU_CONST_CACHE_VS_10:
+	case SQ_ALU_CONST_CACHE_VS_11:
+	case SQ_ALU_CONST_CACHE_VS_12:
+	case SQ_ALU_CONST_CACHE_VS_13:
+	case SQ_ALU_CONST_CACHE_VS_14:
+	case SQ_ALU_CONST_CACHE_VS_15:
 		r = r600_cs_packet_next_reloc(p, &reloc);
 		if (r) {
 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -1226,13 +1280,15 @@
 		}
 		break;
 	case PACKET3_SET_ALU_CONST:
-		start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
-		end_reg = 4 * pkt->count + start_reg - 4;
-		if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
-		    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
-		    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
-			DRM_ERROR("bad SET_ALU_CONST\n");
-			return -EINVAL;
+		if (track->sq_config & DX9_CONSTS) {
+			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
+			end_reg = 4 * pkt->count + start_reg - 4;
+			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
+			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
+			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
+				DRM_ERROR("bad SET_ALU_CONST\n");
+				return -EINVAL;
+			}
 		}
 		break;
 	case PACKET3_SET_BOOL_CONST:
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index fcc949d..029fa14 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -42,13 +42,13 @@
  */
 enum r600_hdmi_iec_status_bits {
 	AUDIO_STATUS_DIG_ENABLE   = 0x01,
-	AUDIO_STATUS_V	    = 0x02,
-	AUDIO_STATUS_VCFG	 = 0x04,
+	AUDIO_STATUS_V            = 0x02,
+	AUDIO_STATUS_VCFG         = 0x04,
 	AUDIO_STATUS_EMPHASIS     = 0x08,
 	AUDIO_STATUS_COPYRIGHT    = 0x10,
 	AUDIO_STATUS_NONAUDIO     = 0x20,
 	AUDIO_STATUS_PROFESSIONAL = 0x40,
-	AUDIO_STATUS_LEVEL	= 0x80
+	AUDIO_STATUS_LEVEL        = 0x80
 };
 
 struct {
@@ -85,7 +85,7 @@
 static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
 {
 	if (*CTS == 0)
-		*CTS = clock*N/(128*freq)*1000;
+		*CTS = clock * N / (128 * freq) * 1000;
 	DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
 		  N, *CTS, freq);
 }
@@ -131,11 +131,11 @@
 					 uint8_t length,
 					 uint8_t *frame)
 {
-    int i;
-    frame[0] = packetType + versionNumber + length;
-    for (i = 1; i <= length; i++)
-	frame[0] += frame[i];
-    frame[0] = 0x100 - frame[0];
+	int i;
+	frame[0] = packetType + versionNumber + length;
+	for (i = 1; i <= length; i++)
+		frame[0] += frame[i];
+	frame[0] = 0x100 - frame[0];
 }
 
 /*
@@ -417,90 +417,141 @@
 	WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
 }
 
-/*
- * enable/disable the HDMI engine
- */
-void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
+static int r600_hdmi_find_free_block(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	bool free_blocks[3] = { true, true, true };
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		radeon_encoder = to_radeon_encoder(encoder);
+		switch (radeon_encoder->hdmi_offset) {
+		case R600_HDMI_BLOCK1:
+			free_blocks[0] = false;
+			break;
+		case R600_HDMI_BLOCK2:
+			free_blocks[1] = false;
+			break;
+		case R600_HDMI_BLOCK3:
+			free_blocks[2] = false;
+			break;
+		}
+	}
+
+	if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) {
+		return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
+	} else if (rdev->family >= CHIP_R600) {
+		if (free_blocks[0])
+			return R600_HDMI_BLOCK1;
+		else if (free_blocks[1])
+			return R600_HDMI_BLOCK2;
+	}
+	return 0;
+}
+
+static void r600_hdmi_assign_block(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
-	if (!offset)
+	if (!dig) {
+		dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
 		return;
+	}
 
-	DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
-
-	/* some version of atombios ignore the enable HDMI flag
-	 * so enabling/disabling HDMI was moved here for TMDS1+2 */
-	switch (radeon_encoder->encoder_id) {
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-		WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
-		WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
-		break;
-
-	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-		WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
-		WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
-		break;
-
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-		/* This part is doubtfull in my opinion */
-		WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
-		break;
-
-	default:
-		DRM_ERROR("unknown HDMI output type\n");
-		break;
+	if (ASIC_IS_DCE4(rdev)) {
+		/* TODO */
+	} else if (ASIC_IS_DCE3(rdev)) {
+		radeon_encoder->hdmi_offset = dig->dig_encoder ?
+			R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
+		if (ASIC_IS_DCE32(rdev))
+			radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
+				R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
+	} else if (rdev->family >= CHIP_R600) {
+		radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
 	}
 }
 
 /*
- * determin at which register offset the HDMI encoder is
+ * enable the HDMI engine
  */
-void r600_hdmi_init(struct drm_encoder *encoder)
+void r600_hdmi_enable(struct drm_encoder *encoder)
 {
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
-	switch (radeon_encoder->encoder_id) {
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-		radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
-		break;
-
-	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-		switch (r600_audio_tmds_index(encoder)) {
-		case 0:
-			radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
-			break;
-		case 1:
-			radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
-			break;
-		default:
-			radeon_encoder->hdmi_offset = 0;
-			break;
+	if (!radeon_encoder->hdmi_offset) {
+		r600_hdmi_assign_block(encoder);
+		if (!radeon_encoder->hdmi_offset) {
+			dev_warn(rdev->dev, "Could not find HDMI block for "
+				"0x%x encoder\n", radeon_encoder->encoder_id);
+			return;
 		}
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-		radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
-		break;
-
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-		radeon_encoder->hdmi_offset = R600_HDMI_DIG;
-		break;
-
-	default:
-		radeon_encoder->hdmi_offset = 0;
-		break;
 	}
 
-	DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
-		  radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+	if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+		WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
+	} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+		int offset = radeon_encoder->hdmi_offset;
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+			WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
+			WREG32(offset + R600_HDMI_ENABLE, 0x101);
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+			WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
+			WREG32(offset + R600_HDMI_ENABLE, 0x105);
+			break;
+		default:
+			dev_err(rdev->dev, "Unknown HDMI output type\n");
+			break;
+		}
+	}
 
-	/* TODO: make this configureable */
-	radeon_encoder->hdmi_audio_workaround = 0;
+	DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+		radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+}
+
+/*
+ * disable the HDMI engine
+ */
+void r600_hdmi_disable(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	if (!radeon_encoder->hdmi_offset) {
+		dev_err(rdev->dev, "Disabling not enabled HDMI\n");
+		return;
+	}
+
+	DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+		radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+
+	if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+		WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
+	} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+		int offset = radeon_encoder->hdmi_offset;
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+			WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
+			WREG32(offset + R600_HDMI_ENABLE, 0);
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+			WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
+			WREG32(offset + R600_HDMI_ENABLE, 0);
+			break;
+		default:
+			dev_err(rdev->dev, "Unknown HDMI output type\n");
+			break;
+		}
+	}
+
+	radeon_encoder->hdmi_offset = 0;
+	radeon_encoder->hdmi_config_offset = 0;
 }
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index d0e28ff..7b1d223 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -152,9 +152,9 @@
 #define R600_AUDIO_STATUS_BITS            0x73d8
 
 /* HDMI base register addresses */
-#define R600_HDMI_TMDS1                   0x7400
-#define R600_HDMI_TMDS2                   0x7700
-#define R600_HDMI_DIG                     0x7800
+#define R600_HDMI_BLOCK1                  0x7400
+#define R600_HDMI_BLOCK2                  0x7700
+#define R600_HDMI_BLOCK3                  0x7800
 
 /* HDMI registers */
 #define R600_HDMI_ENABLE           0x00
@@ -185,4 +185,8 @@
 #define R600_HDMI_AUDIO_DEBUG_2    0xe8
 #define R600_HDMI_AUDIO_DEBUG_3    0xec
 
+/* HDMI additional config base register addresses */
+#define R600_HDMI_CONFIG1                 0x7600
+#define R600_HDMI_CONFIG2                 0x7a00
+
 #endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 5b2e4d4..59c1f87 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -77,6 +77,55 @@
 #define CB_COLOR0_FRAG                                  0x280e0
 #define CB_COLOR0_MASK                                  0x28100
 
+#define SQ_ALU_CONST_CACHE_PS_0				0x28940
+#define SQ_ALU_CONST_CACHE_PS_1				0x28944
+#define SQ_ALU_CONST_CACHE_PS_2				0x28948
+#define SQ_ALU_CONST_CACHE_PS_3				0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4				0x28950
+#define SQ_ALU_CONST_CACHE_PS_5				0x28954
+#define SQ_ALU_CONST_CACHE_PS_6				0x28958
+#define SQ_ALU_CONST_CACHE_PS_7				0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8				0x28960
+#define SQ_ALU_CONST_CACHE_PS_9				0x28964
+#define SQ_ALU_CONST_CACHE_PS_10			0x28968
+#define SQ_ALU_CONST_CACHE_PS_11			0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12			0x28970
+#define SQ_ALU_CONST_CACHE_PS_13			0x28974
+#define SQ_ALU_CONST_CACHE_PS_14			0x28978
+#define SQ_ALU_CONST_CACHE_PS_15			0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0				0x28980
+#define SQ_ALU_CONST_CACHE_VS_1				0x28984
+#define SQ_ALU_CONST_CACHE_VS_2				0x28988
+#define SQ_ALU_CONST_CACHE_VS_3				0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4				0x28990
+#define SQ_ALU_CONST_CACHE_VS_5				0x28994
+#define SQ_ALU_CONST_CACHE_VS_6				0x28998
+#define SQ_ALU_CONST_CACHE_VS_7				0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8				0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9				0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10			0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11			0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12			0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13			0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14			0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15			0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0				0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1				0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2				0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3				0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4				0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5				0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6				0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7				0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8				0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9				0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10			0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11			0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12			0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13			0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14			0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15			0x289fc
+
 #define	CONFIG_MEMSIZE					0x5428
 #define CONFIG_CNTL					0x5424
 #define	CP_STAT						0x8680
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 829e26e..034218c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -91,6 +91,8 @@
 extern int radeon_new_pll;
 extern int radeon_dynpm;
 extern int radeon_audio;
+extern int radeon_disp_priority;
+extern int radeon_hw_i2c;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -168,6 +170,7 @@
  * Power management
  */
 int radeon_pm_init(struct radeon_device *rdev);
+void radeon_pm_fini(struct radeon_device *rdev);
 void radeon_pm_compute_clocks(struct radeon_device *rdev);
 void radeon_combios_get_power_modes(struct radeon_device *rdev);
 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
@@ -687,6 +690,7 @@
 	bool 			downclocked;
 	int			active_crtcs;
 	int			req_vblank;
+	bool			vblank_sync;
 	fixed20_12		max_bandwidth;
 	fixed20_12		igp_sideport_mclk;
 	fixed20_12		igp_system_mclk;
@@ -697,6 +701,7 @@
 	fixed20_12		ht_bandwidth;
 	fixed20_12		core_bandwidth;
 	fixed20_12		sclk;
+	fixed20_12		mclk;
 	fixed20_12		needed_bandwidth;
 	/* XXX: use a define for num power modes */
 	struct radeon_power_state power_state[8];
@@ -707,6 +712,7 @@
 	struct radeon_power_state *requested_power_state;
 	struct radeon_pm_clock_info *requested_clock_mode;
 	struct radeon_power_state *default_power_state;
+	struct radeon_i2c_chan *i2c_bus;
 };
 
 
@@ -729,8 +735,6 @@
 			     struct drm_info_list *files,
 			     unsigned nfiles);
 int radeon_debugfs_fence_init(struct radeon_device *rdev);
-int r100_debugfs_rbbm_init(struct radeon_device *rdev);
-int r100_debugfs_cp_init(struct radeon_device *rdev);
 
 
 /*
@@ -782,7 +786,7 @@
 	int (*set_surface_reg)(struct radeon_device *rdev, int reg,
 			       uint32_t tiling_flags, uint32_t pitch,
 			       uint32_t offset, uint32_t obj_size);
-	int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
+	void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
 	void (*bandwidth_update)(struct radeon_device *rdev);
 	void (*hpd_init)(struct radeon_device *rdev);
 	void (*hpd_fini)(struct radeon_device *rdev);
@@ -862,6 +866,12 @@
 	struct rv770_asic	rv770;
 };
 
+/*
+ * asic initizalization from radeon_asic.c
+ */
+void radeon_agp_disable(struct radeon_device *rdev);
+int radeon_asic_init(struct radeon_device *rdev);
+
 
 /*
  * IOCTL.
@@ -1172,6 +1182,8 @@
 extern int radeon_modeset_init(struct radeon_device *rdev);
 extern void radeon_modeset_fini(struct radeon_device *rdev);
 extern bool radeon_card_posted(struct radeon_device *rdev);
+extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
+extern void radeon_update_display_priority(struct radeon_device *rdev);
 extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
 extern int radeon_clocks_init(struct radeon_device *rdev);
 extern void radeon_clocks_fini(struct radeon_device *rdev);
@@ -1188,51 +1200,6 @@
 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
 
 /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
-struct r100_mc_save {
-	u32	GENMO_WT;
-	u32	CRTC_EXT_CNTL;
-	u32	CRTC_GEN_CNTL;
-	u32	CRTC2_GEN_CNTL;
-	u32	CUR_OFFSET;
-	u32	CUR2_OFFSET;
-};
-extern void r100_cp_disable(struct radeon_device *rdev);
-extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
-extern void r100_cp_fini(struct radeon_device *rdev);
-extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
-extern int r100_pci_gart_init(struct radeon_device *rdev);
-extern void r100_pci_gart_fini(struct radeon_device *rdev);
-extern int r100_pci_gart_enable(struct radeon_device *rdev);
-extern void r100_pci_gart_disable(struct radeon_device *rdev);
-extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-extern int r100_debugfs_mc_info_init(struct radeon_device *rdev);
-extern int r100_gui_wait_for_idle(struct radeon_device *rdev);
-extern void r100_ib_fini(struct radeon_device *rdev);
-extern int r100_ib_init(struct radeon_device *rdev);
-extern void r100_irq_disable(struct radeon_device *rdev);
-extern int r100_irq_set(struct radeon_device *rdev);
-extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
-extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
-extern void r100_vram_init_sizes(struct radeon_device *rdev);
-extern void r100_wb_disable(struct radeon_device *rdev);
-extern void r100_wb_fini(struct radeon_device *rdev);
-extern int r100_wb_init(struct radeon_device *rdev);
-extern void r100_hdp_reset(struct radeon_device *rdev);
-extern int r100_rb2d_reset(struct radeon_device *rdev);
-extern int r100_cp_reset(struct radeon_device *rdev);
-extern void r100_vga_render_disable(struct radeon_device *rdev);
-extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
-						struct radeon_cs_packet *pkt,
-						struct radeon_bo *robj);
-extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
-				struct radeon_cs_packet *pkt,
-				const unsigned *auth, unsigned n,
-				radeon_packet0_check_t check);
-extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
-				struct radeon_cs_packet *pkt,
-				unsigned idx);
-extern void r100_enable_bm(struct radeon_device *rdev);
-extern void r100_set_common_regs(struct radeon_device *rdev);
 
 /* rv200,rv250,rv280 */
 extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1322,7 +1289,8 @@
 extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
 extern void r600_audio_fini(struct radeon_device *rdev);
 extern void r600_hdmi_init(struct drm_encoder *encoder);
-extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
+extern void r600_hdmi_enable(struct drm_encoder *encoder);
+extern void r600_hdmi_disable(struct drm_encoder *encoder);
 extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
 extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
 extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
new file mode 100644
index 0000000..a4b4bc9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -0,0 +1,772 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <linux/console.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/*
+ * Registers accessors functions.
+ */
+static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+	BUG_ON(1);
+	return 0;
+}
+
+static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
+		  reg, v);
+	BUG_ON(1);
+}
+
+static void radeon_register_accessor_init(struct radeon_device *rdev)
+{
+	rdev->mc_rreg = &radeon_invalid_rreg;
+	rdev->mc_wreg = &radeon_invalid_wreg;
+	rdev->pll_rreg = &radeon_invalid_rreg;
+	rdev->pll_wreg = &radeon_invalid_wreg;
+	rdev->pciep_rreg = &radeon_invalid_rreg;
+	rdev->pciep_wreg = &radeon_invalid_wreg;
+
+	/* Don't change order as we are overridding accessor. */
+	if (rdev->family < CHIP_RV515) {
+		rdev->pcie_reg_mask = 0xff;
+	} else {
+		rdev->pcie_reg_mask = 0x7ff;
+	}
+	/* FIXME: not sure here */
+	if (rdev->family <= CHIP_R580) {
+		rdev->pll_rreg = &r100_pll_rreg;
+		rdev->pll_wreg = &r100_pll_wreg;
+	}
+	if (rdev->family >= CHIP_R420) {
+		rdev->mc_rreg = &r420_mc_rreg;
+		rdev->mc_wreg = &r420_mc_wreg;
+	}
+	if (rdev->family >= CHIP_RV515) {
+		rdev->mc_rreg = &rv515_mc_rreg;
+		rdev->mc_wreg = &rv515_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
+		rdev->mc_rreg = &rs400_mc_rreg;
+		rdev->mc_wreg = &rs400_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+		rdev->mc_rreg = &rs690_mc_rreg;
+		rdev->mc_wreg = &rs690_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS600) {
+		rdev->mc_rreg = &rs600_mc_rreg;
+		rdev->mc_wreg = &rs600_mc_wreg;
+	}
+	if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
+		rdev->pciep_rreg = &r600_pciep_rreg;
+		rdev->pciep_wreg = &r600_pciep_wreg;
+	}
+}
+
+
+/* helper to disable agp */
+void radeon_agp_disable(struct radeon_device *rdev)
+{
+	rdev->flags &= ~RADEON_IS_AGP;
+	if (rdev->family >= CHIP_R600) {
+		DRM_INFO("Forcing AGP to PCIE mode\n");
+		rdev->flags |= RADEON_IS_PCIE;
+	} else if (rdev->family >= CHIP_RV515 ||
+			rdev->family == CHIP_RV380 ||
+			rdev->family == CHIP_RV410 ||
+			rdev->family == CHIP_R423) {
+		DRM_INFO("Forcing AGP to PCIE mode\n");
+		rdev->flags |= RADEON_IS_PCIE;
+		rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+		rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
+	} else {
+		DRM_INFO("Forcing AGP to PCI mode\n");
+		rdev->flags |= RADEON_IS_PCI;
+		rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+		rdev->asic->gart_set_page = &r100_pci_gart_set_page;
+	}
+	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+}
+
+/*
+ * ASIC
+ */
+static struct radeon_asic r100_asic = {
+	.init = &r100_init,
+	.fini = &r100_fini,
+	.suspend = &r100_suspend,
+	.resume = &r100_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.gpu_reset = &r100_gpu_reset,
+	.gart_tlb_flush = &r100_pci_gart_tlb_flush,
+	.gart_set_page = &r100_pci_gart_set_page,
+	.cp_commit = &r100_cp_commit,
+	.ring_start = &r100_ring_start,
+	.ring_test = &r100_ring_test,
+	.ring_ib_execute = &r100_ring_ib_execute,
+	.irq_set = &r100_irq_set,
+	.irq_process = &r100_irq_process,
+	.get_vblank_counter = &r100_get_vblank_counter,
+	.fence_ring_emit = &r100_fence_ring_emit,
+	.cs_parse = &r100_cs_parse,
+	.copy_blit = &r100_copy_blit,
+	.copy_dma = NULL,
+	.copy = &r100_copy_blit,
+	.get_engine_clock = &radeon_legacy_get_engine_clock,
+	.set_engine_clock = &radeon_legacy_set_engine_clock,
+	.get_memory_clock = &radeon_legacy_get_memory_clock,
+	.set_memory_clock = NULL,
+	.get_pcie_lanes = NULL,
+	.set_pcie_lanes = NULL,
+	.set_clock_gating = &radeon_legacy_set_clock_gating,
+	.set_surface_reg = r100_set_surface_reg,
+	.clear_surface_reg = r100_clear_surface_reg,
+	.bandwidth_update = &r100_bandwidth_update,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
+	.ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic r200_asic = {
+	.init = &r100_init,
+	.fini = &r100_fini,
+	.suspend = &r100_suspend,
+	.resume = &r100_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.gpu_reset = &r100_gpu_reset,
+	.gart_tlb_flush = &r100_pci_gart_tlb_flush,
+	.gart_set_page = &r100_pci_gart_set_page,
+	.cp_commit = &r100_cp_commit,
+	.ring_start = &r100_ring_start,
+	.ring_test = &r100_ring_test,
+	.ring_ib_execute = &r100_ring_ib_execute,
+	.irq_set = &r100_irq_set,
+	.irq_process = &r100_irq_process,
+	.get_vblank_counter = &r100_get_vblank_counter,
+	.fence_ring_emit = &r100_fence_ring_emit,
+	.cs_parse = &r100_cs_parse,
+	.copy_blit = &r100_copy_blit,
+	.copy_dma = &r200_copy_dma,
+	.copy = &r100_copy_blit,
+	.get_engine_clock = &radeon_legacy_get_engine_clock,
+	.set_engine_clock = &radeon_legacy_set_engine_clock,
+	.get_memory_clock = &radeon_legacy_get_memory_clock,
+	.set_memory_clock = NULL,
+	.set_pcie_lanes = NULL,
+	.set_clock_gating = &radeon_legacy_set_clock_gating,
+	.set_surface_reg = r100_set_surface_reg,
+	.clear_surface_reg = r100_clear_surface_reg,
+	.bandwidth_update = &r100_bandwidth_update,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
+	.ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic r300_asic = {
+	.init = &r300_init,
+	.fini = &r300_fini,
+	.suspend = &r300_suspend,
+	.resume = &r300_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.gpu_reset = &r300_gpu_reset,
+	.gart_tlb_flush = &r100_pci_gart_tlb_flush,
+	.gart_set_page = &r100_pci_gart_set_page,
+	.cp_commit = &r100_cp_commit,
+	.ring_start = &r300_ring_start,
+	.ring_test = &r100_ring_test,
+	.ring_ib_execute = &r100_ring_ib_execute,
+	.irq_set = &r100_irq_set,
+	.irq_process = &r100_irq_process,
+	.get_vblank_counter = &r100_get_vblank_counter,
+	.fence_ring_emit = &r300_fence_ring_emit,
+	.cs_parse = &r300_cs_parse,
+	.copy_blit = &r100_copy_blit,
+	.copy_dma = &r200_copy_dma,
+	.copy = &r100_copy_blit,
+	.get_engine_clock = &radeon_legacy_get_engine_clock,
+	.set_engine_clock = &radeon_legacy_set_engine_clock,
+	.get_memory_clock = &radeon_legacy_get_memory_clock,
+	.set_memory_clock = NULL,
+	.get_pcie_lanes = &rv370_get_pcie_lanes,
+	.set_pcie_lanes = &rv370_set_pcie_lanes,
+	.set_clock_gating = &radeon_legacy_set_clock_gating,
+	.set_surface_reg = r100_set_surface_reg,
+	.clear_surface_reg = r100_clear_surface_reg,
+	.bandwidth_update = &r100_bandwidth_update,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
+	.ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic r300_asic_pcie = {
+	.init = &r300_init,
+	.fini = &r300_fini,
+	.suspend = &r300_suspend,
+	.resume = &r300_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.gpu_reset = &r300_gpu_reset,
+	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+	.gart_set_page = &rv370_pcie_gart_set_page,
+	.cp_commit = &r100_cp_commit,
+	.ring_start = &r300_ring_start,
+	.ring_test = &r100_ring_test,
+	.ring_ib_execute = &r100_ring_ib_execute,
+	.irq_set = &r100_irq_set,
+	.irq_process = &r100_irq_process,
+	.get_vblank_counter = &r100_get_vblank_counter,
+	.fence_ring_emit = &r300_fence_ring_emit,
+	.cs_parse = &r300_cs_parse,
+	.copy_blit = &r100_copy_blit,
+	.copy_dma = &r200_copy_dma,
+	.copy = &r100_copy_blit,
+	.get_engine_clock = &radeon_legacy_get_engine_clock,
+	.set_engine_clock = &radeon_legacy_set_engine_clock,
+	.get_memory_clock = &radeon_legacy_get_memory_clock,
+	.set_memory_clock = NULL,
+	.set_pcie_lanes = &rv370_set_pcie_lanes,
+	.set_clock_gating = &radeon_legacy_set_clock_gating,
+	.set_surface_reg = r100_set_surface_reg,
+	.clear_surface_reg = r100_clear_surface_reg,
+	.bandwidth_update = &r100_bandwidth_update,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
+	.ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic r420_asic = {
+	.init = &r420_init,
+	.fini = &r420_fini,
+	.suspend = &r420_suspend,
+	.resume = &r420_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.gpu_reset = &r300_gpu_reset,
+	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+	.gart_set_page = &rv370_pcie_gart_set_page,
+	.cp_commit = &r100_cp_commit,
+	.ring_start = &r300_ring_start,
+	.ring_test = &r100_ring_test,
+	.ring_ib_execute = &r100_ring_ib_execute,
+	.irq_set = &r100_irq_set,
+	.irq_process = &r100_irq_process,
+	.get_vblank_counter = &r100_get_vblank_counter,
+	.fence_ring_emit = &r300_fence_ring_emit,
+	.cs_parse = &r300_cs_parse,
+	.copy_blit = &r100_copy_blit,
+	.copy_dma = &r200_copy_dma,
+	.copy = &r100_copy_blit,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = &radeon_atom_get_memory_clock,
+	.set_memory_clock = &radeon_atom_set_memory_clock,
+	.get_pcie_lanes = &rv370_get_pcie_lanes,
+	.set_pcie_lanes = &rv370_set_pcie_lanes,
+	.set_clock_gating = &radeon_atom_set_clock_gating,
+	.set_surface_reg = r100_set_surface_reg,
+	.clear_surface_reg = r100_clear_surface_reg,
+	.bandwidth_update = &r100_bandwidth_update,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
+	.ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic rs400_asic = {
+	.init = &rs400_init,
+	.fini = &rs400_fini,
+	.suspend = &rs400_suspend,
+	.resume = &rs400_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.gpu_reset = &r300_gpu_reset,
+	.gart_tlb_flush = &rs400_gart_tlb_flush,
+	.gart_set_page = &rs400_gart_set_page,
+	.cp_commit = &r100_cp_commit,
+	.ring_start = &r300_ring_start,
+	.ring_test = &r100_ring_test,
+	.ring_ib_execute = &r100_ring_ib_execute,
+	.irq_set = &r100_irq_set,
+	.irq_process = &r100_irq_process,
+	.get_vblank_counter = &r100_get_vblank_counter,
+	.fence_ring_emit = &r300_fence_ring_emit,
+	.cs_parse = &r300_cs_parse,
+	.copy_blit = &r100_copy_blit,
+	.copy_dma = &r200_copy_dma,
+	.copy = &r100_copy_blit,
+	.get_engine_clock = &radeon_legacy_get_engine_clock,
+	.set_engine_clock = &radeon_legacy_set_engine_clock,
+	.get_memory_clock = &radeon_legacy_get_memory_clock,
+	.set_memory_clock = NULL,
+	.get_pcie_lanes = NULL,
+	.set_pcie_lanes = NULL,
+	.set_clock_gating = &radeon_legacy_set_clock_gating,
+	.set_surface_reg = r100_set_surface_reg,
+	.clear_surface_reg = r100_clear_surface_reg,
+	.bandwidth_update = &r100_bandwidth_update,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
+	.ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic rs600_asic = {
+	.init = &rs600_init,
+	.fini = &rs600_fini,
+	.suspend = &rs600_suspend,
+	.resume = &rs600_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.gpu_reset = &r300_gpu_reset,
+	.gart_tlb_flush = &rs600_gart_tlb_flush,
+	.gart_set_page = &rs600_gart_set_page,
+	.cp_commit = &r100_cp_commit,
+	.ring_start = &r300_ring_start,
+	.ring_test = &r100_ring_test,
+	.ring_ib_execute = &r100_ring_ib_execute,
+	.irq_set = &rs600_irq_set,
+	.irq_process = &rs600_irq_process,
+	.get_vblank_counter = &rs600_get_vblank_counter,
+	.fence_ring_emit = &r300_fence_ring_emit,
+	.cs_parse = &r300_cs_parse,
+	.copy_blit = &r100_copy_blit,
+	.copy_dma = &r200_copy_dma,
+	.copy = &r100_copy_blit,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = &radeon_atom_get_memory_clock,
+	.set_memory_clock = &radeon_atom_set_memory_clock,
+	.get_pcie_lanes = NULL,
+	.set_pcie_lanes = NULL,
+	.set_clock_gating = &radeon_atom_set_clock_gating,
+	.set_surface_reg = r100_set_surface_reg,
+	.clear_surface_reg = r100_clear_surface_reg,
+	.bandwidth_update = &rs600_bandwidth_update,
+	.hpd_init = &rs600_hpd_init,
+	.hpd_fini = &rs600_hpd_fini,
+	.hpd_sense = &rs600_hpd_sense,
+	.hpd_set_polarity = &rs600_hpd_set_polarity,
+	.ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic rs690_asic = {
+	.init = &rs690_init,
+	.fini = &rs690_fini,
+	.suspend = &rs690_suspend,
+	.resume = &rs690_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.gpu_reset = &r300_gpu_reset,
+	.gart_tlb_flush = &rs400_gart_tlb_flush,
+	.gart_set_page = &rs400_gart_set_page,
+	.cp_commit = &r100_cp_commit,
+	.ring_start = &r300_ring_start,
+	.ring_test = &r100_ring_test,
+	.ring_ib_execute = &r100_ring_ib_execute,
+	.irq_set = &rs600_irq_set,
+	.irq_process = &rs600_irq_process,
+	.get_vblank_counter = &rs600_get_vblank_counter,
+	.fence_ring_emit = &r300_fence_ring_emit,
+	.cs_parse = &r300_cs_parse,
+	.copy_blit = &r100_copy_blit,
+	.copy_dma = &r200_copy_dma,
+	.copy = &r200_copy_dma,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = &radeon_atom_get_memory_clock,
+	.set_memory_clock = &radeon_atom_set_memory_clock,
+	.get_pcie_lanes = NULL,
+	.set_pcie_lanes = NULL,
+	.set_clock_gating = &radeon_atom_set_clock_gating,
+	.set_surface_reg = r100_set_surface_reg,
+	.clear_surface_reg = r100_clear_surface_reg,
+	.bandwidth_update = &rs690_bandwidth_update,
+	.hpd_init = &rs600_hpd_init,
+	.hpd_fini = &rs600_hpd_fini,
+	.hpd_sense = &rs600_hpd_sense,
+	.hpd_set_polarity = &rs600_hpd_set_polarity,
+	.ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic rv515_asic = {
+	.init = &rv515_init,
+	.fini = &rv515_fini,
+	.suspend = &rv515_suspend,
+	.resume = &rv515_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.gpu_reset = &rv515_gpu_reset,
+	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+	.gart_set_page = &rv370_pcie_gart_set_page,
+	.cp_commit = &r100_cp_commit,
+	.ring_start = &rv515_ring_start,
+	.ring_test = &r100_ring_test,
+	.ring_ib_execute = &r100_ring_ib_execute,
+	.irq_set = &rs600_irq_set,
+	.irq_process = &rs600_irq_process,
+	.get_vblank_counter = &rs600_get_vblank_counter,
+	.fence_ring_emit = &r300_fence_ring_emit,
+	.cs_parse = &r300_cs_parse,
+	.copy_blit = &r100_copy_blit,
+	.copy_dma = &r200_copy_dma,
+	.copy = &r100_copy_blit,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = &radeon_atom_get_memory_clock,
+	.set_memory_clock = &radeon_atom_set_memory_clock,
+	.get_pcie_lanes = &rv370_get_pcie_lanes,
+	.set_pcie_lanes = &rv370_set_pcie_lanes,
+	.set_clock_gating = &radeon_atom_set_clock_gating,
+	.set_surface_reg = r100_set_surface_reg,
+	.clear_surface_reg = r100_clear_surface_reg,
+	.bandwidth_update = &rv515_bandwidth_update,
+	.hpd_init = &rs600_hpd_init,
+	.hpd_fini = &rs600_hpd_fini,
+	.hpd_sense = &rs600_hpd_sense,
+	.hpd_set_polarity = &rs600_hpd_set_polarity,
+	.ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic r520_asic = {
+	.init = &r520_init,
+	.fini = &rv515_fini,
+	.suspend = &rv515_suspend,
+	.resume = &r520_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.gpu_reset = &rv515_gpu_reset,
+	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+	.gart_set_page = &rv370_pcie_gart_set_page,
+	.cp_commit = &r100_cp_commit,
+	.ring_start = &rv515_ring_start,
+	.ring_test = &r100_ring_test,
+	.ring_ib_execute = &r100_ring_ib_execute,
+	.irq_set = &rs600_irq_set,
+	.irq_process = &rs600_irq_process,
+	.get_vblank_counter = &rs600_get_vblank_counter,
+	.fence_ring_emit = &r300_fence_ring_emit,
+	.cs_parse = &r300_cs_parse,
+	.copy_blit = &r100_copy_blit,
+	.copy_dma = &r200_copy_dma,
+	.copy = &r100_copy_blit,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = &radeon_atom_get_memory_clock,
+	.set_memory_clock = &radeon_atom_set_memory_clock,
+	.get_pcie_lanes = &rv370_get_pcie_lanes,
+	.set_pcie_lanes = &rv370_set_pcie_lanes,
+	.set_clock_gating = &radeon_atom_set_clock_gating,
+	.set_surface_reg = r100_set_surface_reg,
+	.clear_surface_reg = r100_clear_surface_reg,
+	.bandwidth_update = &rv515_bandwidth_update,
+	.hpd_init = &rs600_hpd_init,
+	.hpd_fini = &rs600_hpd_fini,
+	.hpd_sense = &rs600_hpd_sense,
+	.hpd_set_polarity = &rs600_hpd_set_polarity,
+	.ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic r600_asic = {
+	.init = &r600_init,
+	.fini = &r600_fini,
+	.suspend = &r600_suspend,
+	.resume = &r600_resume,
+	.cp_commit = &r600_cp_commit,
+	.vga_set_state = &r600_vga_set_state,
+	.gpu_reset = &r600_gpu_reset,
+	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+	.gart_set_page = &rs600_gart_set_page,
+	.ring_test = &r600_ring_test,
+	.ring_ib_execute = &r600_ring_ib_execute,
+	.irq_set = &r600_irq_set,
+	.irq_process = &r600_irq_process,
+	.get_vblank_counter = &rs600_get_vblank_counter,
+	.fence_ring_emit = &r600_fence_ring_emit,
+	.cs_parse = &r600_cs_parse,
+	.copy_blit = &r600_copy_blit,
+	.copy_dma = &r600_copy_blit,
+	.copy = &r600_copy_blit,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = &radeon_atom_get_memory_clock,
+	.set_memory_clock = &radeon_atom_set_memory_clock,
+	.get_pcie_lanes = &rv370_get_pcie_lanes,
+	.set_pcie_lanes = NULL,
+	.set_clock_gating = NULL,
+	.set_surface_reg = r600_set_surface_reg,
+	.clear_surface_reg = r600_clear_surface_reg,
+	.bandwidth_update = &rv515_bandwidth_update,
+	.hpd_init = &r600_hpd_init,
+	.hpd_fini = &r600_hpd_fini,
+	.hpd_sense = &r600_hpd_sense,
+	.hpd_set_polarity = &r600_hpd_set_polarity,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+};
+
+static struct radeon_asic rs780_asic = {
+	.init = &r600_init,
+	.fini = &r600_fini,
+	.suspend = &r600_suspend,
+	.resume = &r600_resume,
+	.cp_commit = &r600_cp_commit,
+	.vga_set_state = &r600_vga_set_state,
+	.gpu_reset = &r600_gpu_reset,
+	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+	.gart_set_page = &rs600_gart_set_page,
+	.ring_test = &r600_ring_test,
+	.ring_ib_execute = &r600_ring_ib_execute,
+	.irq_set = &r600_irq_set,
+	.irq_process = &r600_irq_process,
+	.get_vblank_counter = &rs600_get_vblank_counter,
+	.fence_ring_emit = &r600_fence_ring_emit,
+	.cs_parse = &r600_cs_parse,
+	.copy_blit = &r600_copy_blit,
+	.copy_dma = &r600_copy_blit,
+	.copy = &r600_copy_blit,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = NULL,
+	.set_memory_clock = NULL,
+	.get_pcie_lanes = NULL,
+	.set_pcie_lanes = NULL,
+	.set_clock_gating = NULL,
+	.set_surface_reg = r600_set_surface_reg,
+	.clear_surface_reg = r600_clear_surface_reg,
+	.bandwidth_update = &rs690_bandwidth_update,
+	.hpd_init = &r600_hpd_init,
+	.hpd_fini = &r600_hpd_fini,
+	.hpd_sense = &r600_hpd_sense,
+	.hpd_set_polarity = &r600_hpd_set_polarity,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+};
+
+static struct radeon_asic rv770_asic = {
+	.init = &rv770_init,
+	.fini = &rv770_fini,
+	.suspend = &rv770_suspend,
+	.resume = &rv770_resume,
+	.cp_commit = &r600_cp_commit,
+	.gpu_reset = &rv770_gpu_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+	.gart_set_page = &rs600_gart_set_page,
+	.ring_test = &r600_ring_test,
+	.ring_ib_execute = &r600_ring_ib_execute,
+	.irq_set = &r600_irq_set,
+	.irq_process = &r600_irq_process,
+	.get_vblank_counter = &rs600_get_vblank_counter,
+	.fence_ring_emit = &r600_fence_ring_emit,
+	.cs_parse = &r600_cs_parse,
+	.copy_blit = &r600_copy_blit,
+	.copy_dma = &r600_copy_blit,
+	.copy = &r600_copy_blit,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = &radeon_atom_get_memory_clock,
+	.set_memory_clock = &radeon_atom_set_memory_clock,
+	.get_pcie_lanes = &rv370_get_pcie_lanes,
+	.set_pcie_lanes = NULL,
+	.set_clock_gating = &radeon_atom_set_clock_gating,
+	.set_surface_reg = r600_set_surface_reg,
+	.clear_surface_reg = r600_clear_surface_reg,
+	.bandwidth_update = &rv515_bandwidth_update,
+	.hpd_init = &r600_hpd_init,
+	.hpd_fini = &r600_hpd_fini,
+	.hpd_sense = &r600_hpd_sense,
+	.hpd_set_polarity = &r600_hpd_set_polarity,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+};
+
+static struct radeon_asic evergreen_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.cp_commit = NULL,
+	.gpu_reset = &evergreen_gpu_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+	.gart_set_page = &rs600_gart_set_page,
+	.ring_test = NULL,
+	.ring_ib_execute = NULL,
+	.irq_set = NULL,
+	.irq_process = NULL,
+	.get_vblank_counter = NULL,
+	.fence_ring_emit = NULL,
+	.cs_parse = NULL,
+	.copy_blit = NULL,
+	.copy_dma = NULL,
+	.copy = NULL,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = &radeon_atom_get_memory_clock,
+	.set_memory_clock = &radeon_atom_set_memory_clock,
+	.set_pcie_lanes = NULL,
+	.set_clock_gating = NULL,
+	.set_surface_reg = r600_set_surface_reg,
+	.clear_surface_reg = r600_clear_surface_reg,
+	.bandwidth_update = &evergreen_bandwidth_update,
+	.hpd_init = &evergreen_hpd_init,
+	.hpd_fini = &evergreen_hpd_fini,
+	.hpd_sense = &evergreen_hpd_sense,
+	.hpd_set_polarity = &evergreen_hpd_set_polarity,
+};
+
+int radeon_asic_init(struct radeon_device *rdev)
+{
+	radeon_register_accessor_init(rdev);
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+		rdev->asic = &r100_asic;
+		break;
+	case CHIP_R200:
+	case CHIP_RV250:
+	case CHIP_RS300:
+	case CHIP_RV280:
+		rdev->asic = &r200_asic;
+		break;
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_RV350:
+	case CHIP_RV380:
+		if (rdev->flags & RADEON_IS_PCIE)
+			rdev->asic = &r300_asic_pcie;
+		else
+			rdev->asic = &r300_asic;
+		break;
+	case CHIP_R420:
+	case CHIP_R423:
+	case CHIP_RV410:
+		rdev->asic = &r420_asic;
+		break;
+	case CHIP_RS400:
+	case CHIP_RS480:
+		rdev->asic = &rs400_asic;
+		break;
+	case CHIP_RS600:
+		rdev->asic = &rs600_asic;
+		break;
+	case CHIP_RS690:
+	case CHIP_RS740:
+		rdev->asic = &rs690_asic;
+		break;
+	case CHIP_RV515:
+		rdev->asic = &rv515_asic;
+		break;
+	case CHIP_R520:
+	case CHIP_RV530:
+	case CHIP_RV560:
+	case CHIP_RV570:
+	case CHIP_R580:
+		rdev->asic = &r520_asic;
+		break;
+	case CHIP_R600:
+	case CHIP_RV610:
+	case CHIP_RV630:
+	case CHIP_RV620:
+	case CHIP_RV635:
+	case CHIP_RV670:
+		rdev->asic = &r600_asic;
+		break;
+	case CHIP_RS780:
+	case CHIP_RS880:
+		rdev->asic = &rs780_asic;
+		break;
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+	case CHIP_RV740:
+		rdev->asic = &rv770_asic;
+		break;
+	case CHIP_CEDAR:
+	case CHIP_REDWOOD:
+	case CHIP_JUNIPER:
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		rdev->asic = &evergreen_asic;
+		break;
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		rdev->asic->get_memory_clock = NULL;
+		rdev->asic->set_memory_clock = NULL;
+	}
+
+	/* set the number of crtcs */
+	if (rdev->flags & RADEON_SINGLE_CRTC)
+		rdev->num_crtc = 1;
+	else {
+		if (ASIC_IS_DCE4(rdev))
+			rdev->num_crtc = 6;
+		else
+			rdev->num_crtc = 2;
+	}
+
+	return 0;
+}
+
+/*
+ * Wrapper around modesetting bits. Move to radeon_clocks.c?
+ */
+int radeon_clocks_init(struct radeon_device *rdev)
+{
+	int r;
+
+	r = radeon_static_clocks_init(rdev->ddev);
+	if (r) {
+		return r;
+	}
+	DRM_INFO("Clocks initialized !\n");
+	return 0;
+}
+
+void radeon_clocks_fini(struct radeon_device *rdev)
+{
+}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index d3a157b..a0b8280 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -45,10 +45,18 @@
 /*
  * r100,rv100,rs100,rv200,rs200
  */
-extern int r100_init(struct radeon_device *rdev);
-extern void r100_fini(struct radeon_device *rdev);
-extern int r100_suspend(struct radeon_device *rdev);
-extern int r100_resume(struct radeon_device *rdev);
+struct r100_mc_save {
+	u32	GENMO_WT;
+	u32	CRTC_EXT_CNTL;
+	u32	CRTC_GEN_CNTL;
+	u32	CRTC2_GEN_CNTL;
+	u32	CUR_OFFSET;
+	u32	CUR2_OFFSET;
+};
+int r100_init(struct radeon_device *rdev);
+void r100_fini(struct radeon_device *rdev);
+int r100_suspend(struct radeon_device *rdev);
+int r100_resume(struct radeon_device *rdev);
 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void r100_vga_set_state(struct radeon_device *rdev, bool state);
@@ -73,7 +81,7 @@
 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
 			 uint32_t tiling_flags, uint32_t pitch,
 			 uint32_t offset, uint32_t obj_size);
-int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
+void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
 void r100_bandwidth_update(struct radeon_device *rdev);
 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r100_ring_test(struct radeon_device *rdev);
@@ -82,44 +90,42 @@
 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 void r100_hpd_set_polarity(struct radeon_device *rdev,
 			   enum radeon_hpd_id hpd);
-
-static struct radeon_asic r100_asic = {
-	.init = &r100_init,
-	.fini = &r100_fini,
-	.suspend = &r100_suspend,
-	.resume = &r100_resume,
-	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r100_gpu_reset,
-	.gart_tlb_flush = &r100_pci_gart_tlb_flush,
-	.gart_set_page = &r100_pci_gart_set_page,
-	.cp_commit = &r100_cp_commit,
-	.ring_start = &r100_ring_start,
-	.ring_test = &r100_ring_test,
-	.ring_ib_execute = &r100_ring_ib_execute,
-	.irq_set = &r100_irq_set,
-	.irq_process = &r100_irq_process,
-	.get_vblank_counter = &r100_get_vblank_counter,
-	.fence_ring_emit = &r100_fence_ring_emit,
-	.cs_parse = &r100_cs_parse,
-	.copy_blit = &r100_copy_blit,
-	.copy_dma = NULL,
-	.copy = &r100_copy_blit,
-	.get_engine_clock = &radeon_legacy_get_engine_clock,
-	.set_engine_clock = &radeon_legacy_set_engine_clock,
-	.get_memory_clock = &radeon_legacy_get_memory_clock,
-	.set_memory_clock = NULL,
-	.get_pcie_lanes = NULL,
-	.set_pcie_lanes = NULL,
-	.set_clock_gating = &radeon_legacy_set_clock_gating,
-	.set_surface_reg = r100_set_surface_reg,
-	.clear_surface_reg = r100_clear_surface_reg,
-	.bandwidth_update = &r100_bandwidth_update,
-	.hpd_init = &r100_hpd_init,
-	.hpd_fini = &r100_hpd_fini,
-	.hpd_sense = &r100_hpd_sense,
-	.hpd_set_polarity = &r100_hpd_set_polarity,
-	.ioctl_wait_idle = NULL,
-};
+int r100_debugfs_rbbm_init(struct radeon_device *rdev);
+int r100_debugfs_cp_init(struct radeon_device *rdev);
+void r100_cp_disable(struct radeon_device *rdev);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+void r100_cp_fini(struct radeon_device *rdev);
+int r100_pci_gart_init(struct radeon_device *rdev);
+void r100_pci_gart_fini(struct radeon_device *rdev);
+int r100_pci_gart_enable(struct radeon_device *rdev);
+void r100_pci_gart_disable(struct radeon_device *rdev);
+int r100_debugfs_mc_info_init(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+void r100_ib_fini(struct radeon_device *rdev);
+int r100_ib_init(struct radeon_device *rdev);
+void r100_irq_disable(struct radeon_device *rdev);
+void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_vram_init_sizes(struct radeon_device *rdev);
+void r100_wb_disable(struct radeon_device *rdev);
+void r100_wb_fini(struct radeon_device *rdev);
+int r100_wb_init(struct radeon_device *rdev);
+void r100_hdp_reset(struct radeon_device *rdev);
+int r100_rb2d_reset(struct radeon_device *rdev);
+int r100_cp_reset(struct radeon_device *rdev);
+void r100_vga_render_disable(struct radeon_device *rdev);
+int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+					 struct radeon_cs_packet *pkt,
+					 struct radeon_bo *robj);
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+			  struct radeon_cs_packet *pkt,
+			  const unsigned *auth, unsigned n,
+			  radeon_packet0_check_t check);
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+			 struct radeon_cs_packet *pkt,
+			 unsigned idx);
+void r100_enable_bm(struct radeon_device *rdev);
+void r100_set_common_regs(struct radeon_device *rdev);
 
 /*
  * r200,rv250,rs300,rv280
@@ -129,43 +135,6 @@
 			uint64_t dst_offset,
 			unsigned num_pages,
 			struct radeon_fence *fence);
-static struct radeon_asic r200_asic = {
-	.init = &r100_init,
-	.fini = &r100_fini,
-	.suspend = &r100_suspend,
-	.resume = &r100_resume,
-	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r100_gpu_reset,
-	.gart_tlb_flush = &r100_pci_gart_tlb_flush,
-	.gart_set_page = &r100_pci_gart_set_page,
-	.cp_commit = &r100_cp_commit,
-	.ring_start = &r100_ring_start,
-	.ring_test = &r100_ring_test,
-	.ring_ib_execute = &r100_ring_ib_execute,
-	.irq_set = &r100_irq_set,
-	.irq_process = &r100_irq_process,
-	.get_vblank_counter = &r100_get_vblank_counter,
-	.fence_ring_emit = &r100_fence_ring_emit,
-	.cs_parse = &r100_cs_parse,
-	.copy_blit = &r100_copy_blit,
-	.copy_dma = &r200_copy_dma,
-	.copy = &r100_copy_blit,
-	.get_engine_clock = &radeon_legacy_get_engine_clock,
-	.set_engine_clock = &radeon_legacy_set_engine_clock,
-	.get_memory_clock = &radeon_legacy_get_memory_clock,
-	.set_memory_clock = NULL,
-	.set_pcie_lanes = NULL,
-	.set_clock_gating = &radeon_legacy_set_clock_gating,
-	.set_surface_reg = r100_set_surface_reg,
-	.clear_surface_reg = r100_clear_surface_reg,
-	.bandwidth_update = &r100_bandwidth_update,
-	.hpd_init = &r100_hpd_init,
-	.hpd_fini = &r100_hpd_fini,
-	.hpd_sense = &r100_hpd_sense,
-	.hpd_set_polarity = &r100_hpd_set_polarity,
-	.ioctl_wait_idle = NULL,
-};
-
 
 /*
  * r300,r350,rv350,rv380
@@ -186,82 +155,6 @@
 extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
 extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
 
-static struct radeon_asic r300_asic = {
-	.init = &r300_init,
-	.fini = &r300_fini,
-	.suspend = &r300_suspend,
-	.resume = &r300_resume,
-	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r300_gpu_reset,
-	.gart_tlb_flush = &r100_pci_gart_tlb_flush,
-	.gart_set_page = &r100_pci_gart_set_page,
-	.cp_commit = &r100_cp_commit,
-	.ring_start = &r300_ring_start,
-	.ring_test = &r100_ring_test,
-	.ring_ib_execute = &r100_ring_ib_execute,
-	.irq_set = &r100_irq_set,
-	.irq_process = &r100_irq_process,
-	.get_vblank_counter = &r100_get_vblank_counter,
-	.fence_ring_emit = &r300_fence_ring_emit,
-	.cs_parse = &r300_cs_parse,
-	.copy_blit = &r100_copy_blit,
-	.copy_dma = &r200_copy_dma,
-	.copy = &r100_copy_blit,
-	.get_engine_clock = &radeon_legacy_get_engine_clock,
-	.set_engine_clock = &radeon_legacy_set_engine_clock,
-	.get_memory_clock = &radeon_legacy_get_memory_clock,
-	.set_memory_clock = NULL,
-	.get_pcie_lanes = &rv370_get_pcie_lanes,
-	.set_pcie_lanes = &rv370_set_pcie_lanes,
-	.set_clock_gating = &radeon_legacy_set_clock_gating,
-	.set_surface_reg = r100_set_surface_reg,
-	.clear_surface_reg = r100_clear_surface_reg,
-	.bandwidth_update = &r100_bandwidth_update,
-	.hpd_init = &r100_hpd_init,
-	.hpd_fini = &r100_hpd_fini,
-	.hpd_sense = &r100_hpd_sense,
-	.hpd_set_polarity = &r100_hpd_set_polarity,
-	.ioctl_wait_idle = NULL,
-};
-
-
-static struct radeon_asic r300_asic_pcie = {
-	.init = &r300_init,
-	.fini = &r300_fini,
-	.suspend = &r300_suspend,
-	.resume = &r300_resume,
-	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r300_gpu_reset,
-	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
-	.gart_set_page = &rv370_pcie_gart_set_page,
-	.cp_commit = &r100_cp_commit,
-	.ring_start = &r300_ring_start,
-	.ring_test = &r100_ring_test,
-	.ring_ib_execute = &r100_ring_ib_execute,
-	.irq_set = &r100_irq_set,
-	.irq_process = &r100_irq_process,
-	.get_vblank_counter = &r100_get_vblank_counter,
-	.fence_ring_emit = &r300_fence_ring_emit,
-	.cs_parse = &r300_cs_parse,
-	.copy_blit = &r100_copy_blit,
-	.copy_dma = &r200_copy_dma,
-	.copy = &r100_copy_blit,
-	.get_engine_clock = &radeon_legacy_get_engine_clock,
-	.set_engine_clock = &radeon_legacy_set_engine_clock,
-	.get_memory_clock = &radeon_legacy_get_memory_clock,
-	.set_memory_clock = NULL,
-	.set_pcie_lanes = &rv370_set_pcie_lanes,
-	.set_clock_gating = &radeon_legacy_set_clock_gating,
-	.set_surface_reg = r100_set_surface_reg,
-	.clear_surface_reg = r100_clear_surface_reg,
-	.bandwidth_update = &r100_bandwidth_update,
-	.hpd_init = &r100_hpd_init,
-	.hpd_fini = &r100_hpd_fini,
-	.hpd_sense = &r100_hpd_sense,
-	.hpd_set_polarity = &r100_hpd_set_polarity,
-	.ioctl_wait_idle = NULL,
-};
-
 /*
  * r420,r423,rv410
  */
@@ -269,44 +162,6 @@
 extern void r420_fini(struct radeon_device *rdev);
 extern int r420_suspend(struct radeon_device *rdev);
 extern int r420_resume(struct radeon_device *rdev);
-static struct radeon_asic r420_asic = {
-	.init = &r420_init,
-	.fini = &r420_fini,
-	.suspend = &r420_suspend,
-	.resume = &r420_resume,
-	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r300_gpu_reset,
-	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
-	.gart_set_page = &rv370_pcie_gart_set_page,
-	.cp_commit = &r100_cp_commit,
-	.ring_start = &r300_ring_start,
-	.ring_test = &r100_ring_test,
-	.ring_ib_execute = &r100_ring_ib_execute,
-	.irq_set = &r100_irq_set,
-	.irq_process = &r100_irq_process,
-	.get_vblank_counter = &r100_get_vblank_counter,
-	.fence_ring_emit = &r300_fence_ring_emit,
-	.cs_parse = &r300_cs_parse,
-	.copy_blit = &r100_copy_blit,
-	.copy_dma = &r200_copy_dma,
-	.copy = &r100_copy_blit,
-	.get_engine_clock = &radeon_atom_get_engine_clock,
-	.set_engine_clock = &radeon_atom_set_engine_clock,
-	.get_memory_clock = &radeon_atom_get_memory_clock,
-	.set_memory_clock = &radeon_atom_set_memory_clock,
-	.get_pcie_lanes = &rv370_get_pcie_lanes,
-	.set_pcie_lanes = &rv370_set_pcie_lanes,
-	.set_clock_gating = &radeon_atom_set_clock_gating,
-	.set_surface_reg = r100_set_surface_reg,
-	.clear_surface_reg = r100_clear_surface_reg,
-	.bandwidth_update = &r100_bandwidth_update,
-	.hpd_init = &r100_hpd_init,
-	.hpd_fini = &r100_hpd_fini,
-	.hpd_sense = &r100_hpd_sense,
-	.hpd_set_polarity = &r100_hpd_set_polarity,
-	.ioctl_wait_idle = NULL,
-};
-
 
 /*
  * rs400,rs480
@@ -319,44 +174,6 @@
 int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-static struct radeon_asic rs400_asic = {
-	.init = &rs400_init,
-	.fini = &rs400_fini,
-	.suspend = &rs400_suspend,
-	.resume = &rs400_resume,
-	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r300_gpu_reset,
-	.gart_tlb_flush = &rs400_gart_tlb_flush,
-	.gart_set_page = &rs400_gart_set_page,
-	.cp_commit = &r100_cp_commit,
-	.ring_start = &r300_ring_start,
-	.ring_test = &r100_ring_test,
-	.ring_ib_execute = &r100_ring_ib_execute,
-	.irq_set = &r100_irq_set,
-	.irq_process = &r100_irq_process,
-	.get_vblank_counter = &r100_get_vblank_counter,
-	.fence_ring_emit = &r300_fence_ring_emit,
-	.cs_parse = &r300_cs_parse,
-	.copy_blit = &r100_copy_blit,
-	.copy_dma = &r200_copy_dma,
-	.copy = &r100_copy_blit,
-	.get_engine_clock = &radeon_legacy_get_engine_clock,
-	.set_engine_clock = &radeon_legacy_set_engine_clock,
-	.get_memory_clock = &radeon_legacy_get_memory_clock,
-	.set_memory_clock = NULL,
-	.get_pcie_lanes = NULL,
-	.set_pcie_lanes = NULL,
-	.set_clock_gating = &radeon_legacy_set_clock_gating,
-	.set_surface_reg = r100_set_surface_reg,
-	.clear_surface_reg = r100_clear_surface_reg,
-	.bandwidth_update = &r100_bandwidth_update,
-	.hpd_init = &r100_hpd_init,
-	.hpd_fini = &r100_hpd_fini,
-	.hpd_sense = &r100_hpd_sense,
-	.hpd_set_polarity = &r100_hpd_set_polarity,
-	.ioctl_wait_idle = NULL,
-};
-
 
 /*
  * rs600.
@@ -379,45 +196,6 @@
 void rs600_hpd_set_polarity(struct radeon_device *rdev,
 			    enum radeon_hpd_id hpd);
 
-static struct radeon_asic rs600_asic = {
-	.init = &rs600_init,
-	.fini = &rs600_fini,
-	.suspend = &rs600_suspend,
-	.resume = &rs600_resume,
-	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r300_gpu_reset,
-	.gart_tlb_flush = &rs600_gart_tlb_flush,
-	.gart_set_page = &rs600_gart_set_page,
-	.cp_commit = &r100_cp_commit,
-	.ring_start = &r300_ring_start,
-	.ring_test = &r100_ring_test,
-	.ring_ib_execute = &r100_ring_ib_execute,
-	.irq_set = &rs600_irq_set,
-	.irq_process = &rs600_irq_process,
-	.get_vblank_counter = &rs600_get_vblank_counter,
-	.fence_ring_emit = &r300_fence_ring_emit,
-	.cs_parse = &r300_cs_parse,
-	.copy_blit = &r100_copy_blit,
-	.copy_dma = &r200_copy_dma,
-	.copy = &r100_copy_blit,
-	.get_engine_clock = &radeon_atom_get_engine_clock,
-	.set_engine_clock = &radeon_atom_set_engine_clock,
-	.get_memory_clock = &radeon_atom_get_memory_clock,
-	.set_memory_clock = &radeon_atom_set_memory_clock,
-	.get_pcie_lanes = NULL,
-	.set_pcie_lanes = NULL,
-	.set_clock_gating = &radeon_atom_set_clock_gating,
-	.set_surface_reg = r100_set_surface_reg,
-	.clear_surface_reg = r100_clear_surface_reg,
-	.bandwidth_update = &rs600_bandwidth_update,
-	.hpd_init = &rs600_hpd_init,
-	.hpd_fini = &rs600_hpd_fini,
-	.hpd_sense = &rs600_hpd_sense,
-	.hpd_set_polarity = &rs600_hpd_set_polarity,
-	.ioctl_wait_idle = NULL,
-};
-
-
 /*
  * rs690,rs740
  */
@@ -428,44 +206,6 @@
 uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rs690_bandwidth_update(struct radeon_device *rdev);
-static struct radeon_asic rs690_asic = {
-	.init = &rs690_init,
-	.fini = &rs690_fini,
-	.suspend = &rs690_suspend,
-	.resume = &rs690_resume,
-	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &r300_gpu_reset,
-	.gart_tlb_flush = &rs400_gart_tlb_flush,
-	.gart_set_page = &rs400_gart_set_page,
-	.cp_commit = &r100_cp_commit,
-	.ring_start = &r300_ring_start,
-	.ring_test = &r100_ring_test,
-	.ring_ib_execute = &r100_ring_ib_execute,
-	.irq_set = &rs600_irq_set,
-	.irq_process = &rs600_irq_process,
-	.get_vblank_counter = &rs600_get_vblank_counter,
-	.fence_ring_emit = &r300_fence_ring_emit,
-	.cs_parse = &r300_cs_parse,
-	.copy_blit = &r100_copy_blit,
-	.copy_dma = &r200_copy_dma,
-	.copy = &r200_copy_dma,
-	.get_engine_clock = &radeon_atom_get_engine_clock,
-	.set_engine_clock = &radeon_atom_set_engine_clock,
-	.get_memory_clock = &radeon_atom_get_memory_clock,
-	.set_memory_clock = &radeon_atom_set_memory_clock,
-	.get_pcie_lanes = NULL,
-	.set_pcie_lanes = NULL,
-	.set_clock_gating = &radeon_atom_set_clock_gating,
-	.set_surface_reg = r100_set_surface_reg,
-	.clear_surface_reg = r100_clear_surface_reg,
-	.bandwidth_update = &rs690_bandwidth_update,
-	.hpd_init = &rs600_hpd_init,
-	.hpd_fini = &rs600_hpd_fini,
-	.hpd_sense = &rs600_hpd_sense,
-	.hpd_set_polarity = &rs600_hpd_set_polarity,
-	.ioctl_wait_idle = NULL,
-};
-
 
 /*
  * rv515
@@ -481,87 +221,12 @@
 void rv515_bandwidth_update(struct radeon_device *rdev);
 int rv515_resume(struct radeon_device *rdev);
 int rv515_suspend(struct radeon_device *rdev);
-static struct radeon_asic rv515_asic = {
-	.init = &rv515_init,
-	.fini = &rv515_fini,
-	.suspend = &rv515_suspend,
-	.resume = &rv515_resume,
-	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &rv515_gpu_reset,
-	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
-	.gart_set_page = &rv370_pcie_gart_set_page,
-	.cp_commit = &r100_cp_commit,
-	.ring_start = &rv515_ring_start,
-	.ring_test = &r100_ring_test,
-	.ring_ib_execute = &r100_ring_ib_execute,
-	.irq_set = &rs600_irq_set,
-	.irq_process = &rs600_irq_process,
-	.get_vblank_counter = &rs600_get_vblank_counter,
-	.fence_ring_emit = &r300_fence_ring_emit,
-	.cs_parse = &r300_cs_parse,
-	.copy_blit = &r100_copy_blit,
-	.copy_dma = &r200_copy_dma,
-	.copy = &r100_copy_blit,
-	.get_engine_clock = &radeon_atom_get_engine_clock,
-	.set_engine_clock = &radeon_atom_set_engine_clock,
-	.get_memory_clock = &radeon_atom_get_memory_clock,
-	.set_memory_clock = &radeon_atom_set_memory_clock,
-	.get_pcie_lanes = &rv370_get_pcie_lanes,
-	.set_pcie_lanes = &rv370_set_pcie_lanes,
-	.set_clock_gating = &radeon_atom_set_clock_gating,
-	.set_surface_reg = r100_set_surface_reg,
-	.clear_surface_reg = r100_clear_surface_reg,
-	.bandwidth_update = &rv515_bandwidth_update,
-	.hpd_init = &rs600_hpd_init,
-	.hpd_fini = &rs600_hpd_fini,
-	.hpd_sense = &rs600_hpd_sense,
-	.hpd_set_polarity = &rs600_hpd_set_polarity,
-	.ioctl_wait_idle = NULL,
-};
-
 
 /*
  * r520,rv530,rv560,rv570,r580
  */
 int r520_init(struct radeon_device *rdev);
 int r520_resume(struct radeon_device *rdev);
-static struct radeon_asic r520_asic = {
-	.init = &r520_init,
-	.fini = &rv515_fini,
-	.suspend = &rv515_suspend,
-	.resume = &r520_resume,
-	.vga_set_state = &r100_vga_set_state,
-	.gpu_reset = &rv515_gpu_reset,
-	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
-	.gart_set_page = &rv370_pcie_gart_set_page,
-	.cp_commit = &r100_cp_commit,
-	.ring_start = &rv515_ring_start,
-	.ring_test = &r100_ring_test,
-	.ring_ib_execute = &r100_ring_ib_execute,
-	.irq_set = &rs600_irq_set,
-	.irq_process = &rs600_irq_process,
-	.get_vblank_counter = &rs600_get_vblank_counter,
-	.fence_ring_emit = &r300_fence_ring_emit,
-	.cs_parse = &r300_cs_parse,
-	.copy_blit = &r100_copy_blit,
-	.copy_dma = &r200_copy_dma,
-	.copy = &r100_copy_blit,
-	.get_engine_clock = &radeon_atom_get_engine_clock,
-	.set_engine_clock = &radeon_atom_set_engine_clock,
-	.get_memory_clock = &radeon_atom_get_memory_clock,
-	.set_memory_clock = &radeon_atom_set_memory_clock,
-	.get_pcie_lanes = &rv370_get_pcie_lanes,
-	.set_pcie_lanes = &rv370_set_pcie_lanes,
-	.set_clock_gating = &radeon_atom_set_clock_gating,
-	.set_surface_reg = r100_set_surface_reg,
-	.clear_surface_reg = r100_clear_surface_reg,
-	.bandwidth_update = &rv515_bandwidth_update,
-	.hpd_init = &rs600_hpd_init,
-	.hpd_fini = &rs600_hpd_fini,
-	.hpd_sense = &rs600_hpd_sense,
-	.hpd_set_polarity = &rs600_hpd_set_polarity,
-	.ioctl_wait_idle = NULL,
-};
 
 /*
  * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
@@ -591,7 +256,7 @@
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
 			 uint32_t tiling_flags, uint32_t pitch,
 			 uint32_t offset, uint32_t obj_size);
-int r600_clear_surface_reg(struct radeon_device *rdev, int reg);
+void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r600_ring_test(struct radeon_device *rdev);
 int r600_copy_blit(struct radeon_device *rdev,
@@ -604,43 +269,6 @@
 			   enum radeon_hpd_id hpd);
 extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
 
-static struct radeon_asic r600_asic = {
-	.init = &r600_init,
-	.fini = &r600_fini,
-	.suspend = &r600_suspend,
-	.resume = &r600_resume,
-	.cp_commit = &r600_cp_commit,
-	.vga_set_state = &r600_vga_set_state,
-	.gpu_reset = &r600_gpu_reset,
-	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
-	.gart_set_page = &rs600_gart_set_page,
-	.ring_test = &r600_ring_test,
-	.ring_ib_execute = &r600_ring_ib_execute,
-	.irq_set = &r600_irq_set,
-	.irq_process = &r600_irq_process,
-	.get_vblank_counter = &rs600_get_vblank_counter,
-	.fence_ring_emit = &r600_fence_ring_emit,
-	.cs_parse = &r600_cs_parse,
-	.copy_blit = &r600_copy_blit,
-	.copy_dma = &r600_copy_blit,
-	.copy = &r600_copy_blit,
-	.get_engine_clock = &radeon_atom_get_engine_clock,
-	.set_engine_clock = &radeon_atom_set_engine_clock,
-	.get_memory_clock = &radeon_atom_get_memory_clock,
-	.set_memory_clock = &radeon_atom_set_memory_clock,
-	.get_pcie_lanes = &rv370_get_pcie_lanes,
-	.set_pcie_lanes = NULL,
-	.set_clock_gating = NULL,
-	.set_surface_reg = r600_set_surface_reg,
-	.clear_surface_reg = r600_clear_surface_reg,
-	.bandwidth_update = &rv515_bandwidth_update,
-	.hpd_init = &r600_hpd_init,
-	.hpd_fini = &r600_hpd_fini,
-	.hpd_sense = &r600_hpd_sense,
-	.hpd_set_polarity = &r600_hpd_set_polarity,
-	.ioctl_wait_idle = r600_ioctl_wait_idle,
-};
-
 /*
  * rv770,rv730,rv710,rv740
  */
@@ -650,43 +278,6 @@
 int rv770_resume(struct radeon_device *rdev);
 int rv770_gpu_reset(struct radeon_device *rdev);
 
-static struct radeon_asic rv770_asic = {
-	.init = &rv770_init,
-	.fini = &rv770_fini,
-	.suspend = &rv770_suspend,
-	.resume = &rv770_resume,
-	.cp_commit = &r600_cp_commit,
-	.gpu_reset = &rv770_gpu_reset,
-	.vga_set_state = &r600_vga_set_state,
-	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
-	.gart_set_page = &rs600_gart_set_page,
-	.ring_test = &r600_ring_test,
-	.ring_ib_execute = &r600_ring_ib_execute,
-	.irq_set = &r600_irq_set,
-	.irq_process = &r600_irq_process,
-	.get_vblank_counter = &rs600_get_vblank_counter,
-	.fence_ring_emit = &r600_fence_ring_emit,
-	.cs_parse = &r600_cs_parse,
-	.copy_blit = &r600_copy_blit,
-	.copy_dma = &r600_copy_blit,
-	.copy = &r600_copy_blit,
-	.get_engine_clock = &radeon_atom_get_engine_clock,
-	.set_engine_clock = &radeon_atom_set_engine_clock,
-	.get_memory_clock = &radeon_atom_get_memory_clock,
-	.set_memory_clock = &radeon_atom_set_memory_clock,
-	.get_pcie_lanes = &rv370_get_pcie_lanes,
-	.set_pcie_lanes = NULL,
-	.set_clock_gating = &radeon_atom_set_clock_gating,
-	.set_surface_reg = r600_set_surface_reg,
-	.clear_surface_reg = r600_clear_surface_reg,
-	.bandwidth_update = &rv515_bandwidth_update,
-	.hpd_init = &r600_hpd_init,
-	.hpd_fini = &r600_hpd_fini,
-	.hpd_sense = &r600_hpd_sense,
-	.hpd_set_polarity = &r600_hpd_set_polarity,
-	.ioctl_wait_idle = r600_ioctl_wait_idle,
-};
-
 /*
  * evergreen
  */
@@ -701,40 +292,4 @@
 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 void evergreen_hpd_set_polarity(struct radeon_device *rdev,
 				enum radeon_hpd_id hpd);
-
-static struct radeon_asic evergreen_asic = {
-	.init = &evergreen_init,
-	.fini = &evergreen_fini,
-	.suspend = &evergreen_suspend,
-	.resume = &evergreen_resume,
-	.cp_commit = NULL,
-	.gpu_reset = &evergreen_gpu_reset,
-	.vga_set_state = &r600_vga_set_state,
-	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
-	.gart_set_page = &rs600_gart_set_page,
-	.ring_test = NULL,
-	.ring_ib_execute = NULL,
-	.irq_set = NULL,
-	.irq_process = NULL,
-	.get_vblank_counter = NULL,
-	.fence_ring_emit = NULL,
-	.cs_parse = NULL,
-	.copy_blit = NULL,
-	.copy_dma = NULL,
-	.copy = NULL,
-	.get_engine_clock = &radeon_atom_get_engine_clock,
-	.set_engine_clock = &radeon_atom_set_engine_clock,
-	.get_memory_clock = &radeon_atom_get_memory_clock,
-	.set_memory_clock = &radeon_atom_set_memory_clock,
-	.set_pcie_lanes = NULL,
-	.set_clock_gating = NULL,
-	.set_surface_reg = r600_set_surface_reg,
-	.clear_surface_reg = r600_clear_surface_reg,
-	.bandwidth_update = &evergreen_bandwidth_update,
-	.hpd_init = &evergreen_hpd_init,
-	.hpd_fini = &evergreen_hpd_fini,
-	.hpd_sense = &evergreen_hpd_sense,
-	.hpd_set_polarity = &evergreen_hpd_set_polarity,
-};
-
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 93783b1..1fff955 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -75,46 +75,45 @@
 	memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
 	i2c.valid = false;
 
-	atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
+	if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+		i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
 
-	i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+		for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+			gpio = &i2c_info->asGPIO_Info[i];
 
+			if (gpio->sucI2cId.ucAccess == id) {
+				i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+				i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+				i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+				i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+				i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+				i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+				i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+				i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+				i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+				i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+				i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+				i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+				i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+				i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+				i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+				i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
 
-	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
-		gpio = &i2c_info->asGPIO_Info[i];
+				if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+					i2c.hw_capable = true;
+				else
+					i2c.hw_capable = false;
 
-		if (gpio->sucI2cId.ucAccess == id) {
-			i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
-			i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
-			i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
-			i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
-			i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
-			i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
-			i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
-			i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
-			i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
-			i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
-			i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
-			i2c.en_data_mask = (1 << gpio->ucDataEnShift);
-			i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
-			i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
-			i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
-			i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+				if (gpio->sucI2cId.ucAccess == 0xa0)
+					i2c.mm_i2c = true;
+				else
+					i2c.mm_i2c = false;
 
-			if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
-				i2c.hw_capable = true;
-			else
-				i2c.hw_capable = false;
+				i2c.i2c_id = gpio->sucI2cId.ucAccess;
 
-			if (gpio->sucI2cId.ucAccess == 0xa0)
-				i2c.mm_i2c = true;
-			else
-				i2c.mm_i2c = false;
-
-			i2c.i2c_id = gpio->sucI2cId.ucAccess;
-
-			i2c.valid = true;
-			break;
+				i2c.valid = true;
+				break;
+			}
 		}
 	}
 
@@ -135,20 +134,21 @@
 	memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
 	gpio.valid = false;
 
-	atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
+	if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
 
-	gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
 
-	num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
-
-	for (i = 0; i < num_indices; i++) {
-		pin = &gpio_info->asGPIO_Pin[i];
-		if (id == pin->ucGPIO_ID) {
-			gpio.id = pin->ucGPIO_ID;
-			gpio.reg = pin->usGpioPin_AIndex * 4;
-			gpio.mask = (1 << pin->ucGpioPinBitShift);
-			gpio.valid = true;
-			break;
+		for (i = 0; i < num_indices; i++) {
+			pin = &gpio_info->asGPIO_Pin[i];
+			if (id == pin->ucGPIO_ID) {
+				gpio.id = pin->ucGPIO_ID;
+				gpio.reg = pin->usGpioPin_AIndex * 4;
+				gpio.mask = (1 << pin->ucGpioPinBitShift);
+				gpio.valid = true;
+				break;
+			}
 		}
 	}
 
@@ -264,6 +264,8 @@
 		if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
 		    (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
 			return false;
+		if (supported_device == ATOM_DEVICE_CRT2_SUPPORT)
+			*line_mux = 0x90;
 	}
 
 	/* ASUS HD 3600 XT board lists the DVI port as HDMI */
@@ -395,9 +397,7 @@
 	struct radeon_gpio_rec gpio;
 	struct radeon_hpd hpd;
 
-	atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
-
-	if (data_offset == 0)
+	if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
 		return false;
 
 	if (crev < 2)
@@ -449,37 +449,43 @@
 				    GetIndexIntoMasterTable(DATA,
 							    IntegratedSystemInfo);
 
-				atom_parse_data_header(ctx, index, &size, &frev,
-						       &crev, &igp_offset);
+				if (atom_parse_data_header(ctx, index, &size, &frev,
+							   &crev, &igp_offset)) {
 
-				if (crev >= 2) {
-					igp_obj =
-					    (ATOM_INTEGRATED_SYSTEM_INFO_V2
-					     *) (ctx->bios + igp_offset);
+					if (crev >= 2) {
+						igp_obj =
+							(ATOM_INTEGRATED_SYSTEM_INFO_V2
+							 *) (ctx->bios + igp_offset);
 
-					if (igp_obj) {
-						uint32_t slot_config, ct;
+						if (igp_obj) {
+							uint32_t slot_config, ct;
 
-						if (con_obj_num == 1)
-							slot_config =
-							    igp_obj->
-							    ulDDISlot1Config;
-						else
-							slot_config =
-							    igp_obj->
-							    ulDDISlot2Config;
+							if (con_obj_num == 1)
+								slot_config =
+									igp_obj->
+									ulDDISlot1Config;
+							else
+								slot_config =
+									igp_obj->
+									ulDDISlot2Config;
 
-						ct = (slot_config >> 16) & 0xff;
-						connector_type =
-						    object_connector_convert
-						    [ct];
-						connector_object_id = ct;
-						igp_lane_info =
-						    slot_config & 0xffff;
+							ct = (slot_config >> 16) & 0xff;
+							connector_type =
+								object_connector_convert
+								[ct];
+							connector_object_id = ct;
+							igp_lane_info =
+								slot_config & 0xffff;
+						} else
+							continue;
 					} else
 						continue;
-				} else
-					continue;
+				} else {
+					igp_lane_info = 0;
+					connector_type =
+						object_connector_convert[con_obj_id];
+					connector_object_id = con_obj_id;
+				}
 			} else {
 				igp_lane_info = 0;
 				connector_type =
@@ -627,20 +633,23 @@
 		uint8_t frev, crev;
 		ATOM_XTMDS_INFO *xtmds;
 
-		atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
-		xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
+		if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) {
+			xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
 
-		if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
-			if (connector_type == DRM_MODE_CONNECTOR_DVII)
-				return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
-			else
-				return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
-		} else {
-			if (connector_type == DRM_MODE_CONNECTOR_DVII)
-				return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
-			else
-				return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
-		}
+			if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
+				if (connector_type == DRM_MODE_CONNECTOR_DVII)
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+				else
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+			} else {
+				if (connector_type == DRM_MODE_CONNECTOR_DVII)
+					return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+				else
+					return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+			}
+		} else
+			return supported_devices_connector_object_id_convert
+				[connector_type];
 	} else {
 		return supported_devices_connector_object_id_convert
 			[connector_type];
@@ -672,7 +681,8 @@
 	int i, j, max_device;
 	struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
 
-	atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
+	if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+		return false;
 
 	supported_devices =
 	    (union atom_supported_devices *)(ctx->bios + data_offset);
@@ -865,14 +875,11 @@
 	struct radeon_pll *mpll = &rdev->clock.mpll;
 	uint16_t data_offset;
 
-	atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
-			       &crev, &data_offset);
-
-	firmware_info =
-	    (union firmware_info *)(mode_info->atom_context->bios +
-				    data_offset);
-
-	if (firmware_info) {
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(union firmware_info *)(mode_info->atom_context->bios +
+						data_offset);
 		/* pixel clocks */
 		p1pll->reference_freq =
 		    le16_to_cpu(firmware_info->info.usReferenceClock);
@@ -887,6 +894,20 @@
 		p1pll->pll_out_max =
 		    le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
 
+		if (crev >= 4) {
+			p1pll->lcd_pll_out_min =
+				le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+			if (p1pll->lcd_pll_out_min == 0)
+				p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+			p1pll->lcd_pll_out_max =
+				le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+			if (p1pll->lcd_pll_out_max == 0)
+				p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+		} else {
+			p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+			p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+		}
+
 		if (p1pll->pll_out_min == 0) {
 			if (ASIC_IS_AVIVO(rdev))
 				p1pll->pll_out_min = 64800;
@@ -992,13 +1013,10 @@
 	u8 frev, crev;
 	u16 data_offset;
 
-	atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
-			       &crev, &data_offset);
-
-	igp_info = (union igp_info *)(mode_info->atom_context->bios +
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)(mode_info->atom_context->bios +
 				      data_offset);
-
-	if (igp_info) {
 		switch (crev) {
 		case 1:
 			if (igp_info->info.ucMemoryType & 0xf0)
@@ -1029,14 +1047,12 @@
 	uint16_t maxfreq;
 	int i;
 
-	atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
-			       &crev, &data_offset);
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		tmds_info =
+			(struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
+						   data_offset);
 
-	tmds_info =
-	    (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
-				       data_offset);
-
-	if (tmds_info) {
 		maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
 		for (i = 0; i < 4; i++) {
 			tmds->tmds_pll[i].freq =
@@ -1085,13 +1101,11 @@
 	if (id > ATOM_MAX_SS_ENTRY)
 		return NULL;
 
-	atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
-			       &crev, &data_offset);
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		ss_info =
+			(struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
 
-	ss_info =
-	    (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
-
-	if (ss_info) {
 		ss =
 		    kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
 
@@ -1114,30 +1128,6 @@
 	return ss;
 }
 
-static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
-					  struct radeon_encoder_atom_dig *lvds)
-{
-
-	/* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
-	if ((dev->pdev->device == 0x95c4) &&
-	    (dev->pdev->subsystem_vendor == 0x1179) &&
-	    (dev->pdev->subsystem_device == 0xff50)) {
-		if ((lvds->native_mode.hdisplay == 1280) &&
-		    (lvds->native_mode.vdisplay == 800))
-			lvds->pll_algo = PLL_ALGO_LEGACY;
-	}
-
-	/* Dell Studio 15 laptop panel doesn't like new pll divider algo */
-	if ((dev->pdev->device == 0x95c4) &&
-	    (dev->pdev->subsystem_vendor == 0x1028) &&
-	    (dev->pdev->subsystem_device == 0x029f)) {
-		if ((lvds->native_mode.hdisplay == 1280) &&
-		    (lvds->native_mode.vdisplay == 800))
-			lvds->pll_algo = PLL_ALGO_LEGACY;
-	}
-
-}
-
 union lvds_info {
 	struct _ATOM_LVDS_INFO info;
 	struct _ATOM_LVDS_INFO_V12 info_12;
@@ -1156,13 +1146,10 @@
 	uint8_t frev, crev;
 	struct radeon_encoder_atom_dig *lvds = NULL;
 
-	atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
-			       &crev, &data_offset);
-
-	lvds_info =
-	    (union lvds_info *)(mode_info->atom_context->bios + data_offset);
-
-	if (lvds_info) {
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		lvds_info =
+			(union lvds_info *)(mode_info->atom_context->bios + data_offset);
 		lvds =
 		    kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
 
@@ -1220,9 +1207,6 @@
 				lvds->pll_algo = PLL_ALGO_LEGACY;
 		}
 
-		/* LVDS quirks */
-		radeon_atom_apply_lvds_quirks(dev, lvds);
-
 		encoder->native_mode = lvds->native_mode;
 	}
 	return lvds;
@@ -1241,11 +1225,11 @@
 	uint8_t bg, dac;
 	struct radeon_encoder_primary_dac *p_dac = NULL;
 
-	atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		dac_info = (struct _COMPASSIONATE_DATA *)
+			(mode_info->atom_context->bios + data_offset);
 
-	dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
-
-	if (dac_info) {
 		p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
 
 		if (!p_dac)
@@ -1270,7 +1254,9 @@
 	u8 frev, crev;
 	u16 data_offset, misc;
 
-	atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset);
+	if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL,
+				    &frev, &crev, &data_offset))
+		return false;
 
 	switch (crev) {
 	case 1:
@@ -1362,47 +1348,50 @@
 	struct _ATOM_ANALOG_TV_INFO *tv_info;
 	enum radeon_tv_std tv_std = TV_STD_NTSC;
 
-	atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
 
-	tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
+		tv_info = (struct _ATOM_ANALOG_TV_INFO *)
+			(mode_info->atom_context->bios + data_offset);
 
-	switch (tv_info->ucTV_BootUpDefaultStandard) {
-	case ATOM_TV_NTSC:
-		tv_std = TV_STD_NTSC;
-		DRM_INFO("Default TV standard: NTSC\n");
-		break;
-	case ATOM_TV_NTSCJ:
-		tv_std = TV_STD_NTSC_J;
-		DRM_INFO("Default TV standard: NTSC-J\n");
-		break;
-	case ATOM_TV_PAL:
-		tv_std = TV_STD_PAL;
-		DRM_INFO("Default TV standard: PAL\n");
-		break;
-	case ATOM_TV_PALM:
-		tv_std = TV_STD_PAL_M;
-		DRM_INFO("Default TV standard: PAL-M\n");
-		break;
-	case ATOM_TV_PALN:
-		tv_std = TV_STD_PAL_N;
-		DRM_INFO("Default TV standard: PAL-N\n");
-		break;
-	case ATOM_TV_PALCN:
-		tv_std = TV_STD_PAL_CN;
-		DRM_INFO("Default TV standard: PAL-CN\n");
-		break;
-	case ATOM_TV_PAL60:
-		tv_std = TV_STD_PAL_60;
-		DRM_INFO("Default TV standard: PAL-60\n");
-		break;
-	case ATOM_TV_SECAM:
-		tv_std = TV_STD_SECAM;
-		DRM_INFO("Default TV standard: SECAM\n");
-		break;
-	default:
-		tv_std = TV_STD_NTSC;
-		DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
-		break;
+		switch (tv_info->ucTV_BootUpDefaultStandard) {
+		case ATOM_TV_NTSC:
+			tv_std = TV_STD_NTSC;
+			DRM_INFO("Default TV standard: NTSC\n");
+			break;
+		case ATOM_TV_NTSCJ:
+			tv_std = TV_STD_NTSC_J;
+			DRM_INFO("Default TV standard: NTSC-J\n");
+			break;
+		case ATOM_TV_PAL:
+			tv_std = TV_STD_PAL;
+			DRM_INFO("Default TV standard: PAL\n");
+			break;
+		case ATOM_TV_PALM:
+			tv_std = TV_STD_PAL_M;
+			DRM_INFO("Default TV standard: PAL-M\n");
+			break;
+		case ATOM_TV_PALN:
+			tv_std = TV_STD_PAL_N;
+			DRM_INFO("Default TV standard: PAL-N\n");
+			break;
+		case ATOM_TV_PALCN:
+			tv_std = TV_STD_PAL_CN;
+			DRM_INFO("Default TV standard: PAL-CN\n");
+			break;
+		case ATOM_TV_PAL60:
+			tv_std = TV_STD_PAL_60;
+			DRM_INFO("Default TV standard: PAL-60\n");
+			break;
+		case ATOM_TV_SECAM:
+			tv_std = TV_STD_SECAM;
+			DRM_INFO("Default TV standard: SECAM\n");
+			break;
+		default:
+			tv_std = TV_STD_NTSC;
+			DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
+			break;
+		}
 	}
 	return tv_std;
 }
@@ -1420,11 +1409,12 @@
 	uint8_t bg, dac;
 	struct radeon_encoder_tv_dac *tv_dac = NULL;
 
-	atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
 
-	dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
+		dac_info = (struct _COMPASSIONATE_DATA *)
+			(mode_info->atom_context->bios + data_offset);
 
-	if (dac_info) {
 		tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
 
 		if (!tv_dac)
@@ -1447,6 +1437,30 @@
 	return tv_dac;
 }
 
+static const char *thermal_controller_names[] = {
+	"NONE",
+	"LM63",
+	"ADM1032",
+	"ADM1030",
+	"MUA6649",
+	"LM64",
+	"F75375",
+	"ASC7512",
+};
+
+static const char *pp_lib_thermal_controller_names[] = {
+	"NONE",
+	"LM63",
+	"ADM1032",
+	"ADM1030",
+	"MUA6649",
+	"LM64",
+	"F75375",
+	"RV6xx",
+	"RV770",
+	"ADT7473",
+};
+
 union power_info {
 	struct _ATOM_POWERPLAY_INFO info;
 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
@@ -1466,15 +1480,22 @@
 	struct _ATOM_PPLIB_STATE *power_state;
 	int num_modes = 0, i, j;
 	int state_index = 0, mode_index = 0;
-
-	atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
-
-	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+	struct radeon_i2c_bus_rec i2c_bus;
 
 	rdev->pm.default_power_state = NULL;
 
-	if (power_info) {
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 		if (frev < 4) {
+			/* add the i2c bus for thermal/fan chip */
+			if (power_info->info.ucOverdriveThermalController > 0) {
+				DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+					 thermal_controller_names[power_info->info.ucOverdriveThermalController],
+					 power_info->info.ucOverdriveControllerAddress >> 1);
+				i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
+				rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+			}
 			num_modes = power_info->info.ucNumOfPowerModeEntries;
 			if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
 				num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
@@ -1684,6 +1705,24 @@
 				}
 			}
 		} else if (frev == 4) {
+			/* add the i2c bus for thermal/fan chip */
+			/* no support for internal controller yet */
+			if (power_info->info_4.sThermalController.ucType > 0) {
+				if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
+				    (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) {
+					DRM_INFO("Internal thermal controller %s fan control\n",
+						 (power_info->info_4.sThermalController.ucFanParameters &
+						  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+				} else {
+					DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+						 pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType],
+						 power_info->info_4.sThermalController.ucI2cAddress >> 1,
+						 (power_info->info_4.sThermalController.ucFanParameters &
+						  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+					i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine);
+					rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+				}
+			}
 			for (i = 0; i < power_info->info_4.ucNumStates; i++) {
 				mode_index = 0;
 				power_state = (struct _ATOM_PPLIB_STATE *)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e9ea38e..2becded 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -531,10 +531,7 @@
 	case CHIP_RS300:
 		switch (ddc_line) {
 		case RADEON_GPIO_DVI_DDC:
-			/* in theory this should be hw capable,
-			 * but it doesn't seem to work
-			 */
-			i2c.hw_capable = false;
+			i2c.hw_capable = true;
 			break;
 		default:
 			i2c.hw_capable = false;
@@ -633,6 +630,8 @@
 		p1pll->reference_div = RBIOS16(pll_info + 0x10);
 		p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
 		p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
+		p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+		p1pll->lcd_pll_out_max = p1pll->pll_out_max;
 
 		if (rev > 9) {
 			p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ee0083f..60d5981 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -940,7 +940,7 @@
 	if (radeon_connector->edid)
 		kfree(radeon_connector->edid);
 	if (radeon_dig_connector->dp_i2c_bus)
-		radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus);
+		radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
 	kfree(radeon_connector->con_priv);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 70ba02e..f9b0fe0 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -193,9 +193,11 @@
 		radeon_bo_list_fence(&parser->validated, parser->ib->fence);
 	}
 	radeon_bo_list_unreserve(&parser->validated);
-	for (i = 0; i < parser->nrelocs; i++) {
-		if (parser->relocs[i].gobj)
-			drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
+	if (parser->relocs != NULL) {
+		for (i = 0; i < parser->nrelocs; i++) {
+			if (parser->relocs[i].gobj)
+				drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
+		}
 	}
 	kfree(parser->track);
 	kfree(parser->relocs);
@@ -243,7 +245,8 @@
 	}
 	r = radeon_cs_parser_relocs(&parser);
 	if (r) {
-		DRM_ERROR("Failed to parse relocation !\n");
+		if (r != -ERESTARTSYS)
+			DRM_ERROR("Failed to parse relocation %d!\n", r);
 		radeon_cs_parser_fini(&parser, r);
 		mutex_unlock(&rdev->cs_mutex);
 		return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e28e4ed..60ec47b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -33,7 +33,6 @@
 #include <linux/vga_switcheroo.h>
 #include "radeon_reg.h"
 #include "radeon.h"
-#include "radeon_asic.h"
 #include "atom.h"
 
 /*
@@ -242,6 +241,36 @@
 
 }
 
+void radeon_update_bandwidth_info(struct radeon_device *rdev)
+{
+	fixed20_12 a;
+	u32 sclk, mclk;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		sclk = radeon_get_engine_clock(rdev);
+		mclk = rdev->clock.default_mclk;
+
+		a.full = rfixed_const(100);
+		rdev->pm.sclk.full = rfixed_const(sclk);
+		rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+		rdev->pm.mclk.full = rfixed_const(mclk);
+		rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+
+		a.full = rfixed_const(16);
+		/* core_bandwidth = sclk(Mhz) * 16 */
+		rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
+	} else {
+		sclk = radeon_get_engine_clock(rdev);
+		mclk = radeon_get_memory_clock(rdev);
+
+		a.full = rfixed_const(100);
+		rdev->pm.sclk.full = rfixed_const(sclk);
+		rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+		rdev->pm.mclk.full = rfixed_const(mclk);
+		rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+	}
+}
+
 bool radeon_boot_test_post_card(struct radeon_device *rdev)
 {
 	if (radeon_card_posted(rdev))
@@ -288,181 +317,6 @@
 }
 
 
-/*
- * Registers accessors functions.
- */
-uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
-{
-	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
-	BUG_ON(1);
-	return 0;
-}
-
-void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-{
-	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
-		  reg, v);
-	BUG_ON(1);
-}
-
-void radeon_register_accessor_init(struct radeon_device *rdev)
-{
-	rdev->mc_rreg = &radeon_invalid_rreg;
-	rdev->mc_wreg = &radeon_invalid_wreg;
-	rdev->pll_rreg = &radeon_invalid_rreg;
-	rdev->pll_wreg = &radeon_invalid_wreg;
-	rdev->pciep_rreg = &radeon_invalid_rreg;
-	rdev->pciep_wreg = &radeon_invalid_wreg;
-
-	/* Don't change order as we are overridding accessor. */
-	if (rdev->family < CHIP_RV515) {
-		rdev->pcie_reg_mask = 0xff;
-	} else {
-		rdev->pcie_reg_mask = 0x7ff;
-	}
-	/* FIXME: not sure here */
-	if (rdev->family <= CHIP_R580) {
-		rdev->pll_rreg = &r100_pll_rreg;
-		rdev->pll_wreg = &r100_pll_wreg;
-	}
-	if (rdev->family >= CHIP_R420) {
-		rdev->mc_rreg = &r420_mc_rreg;
-		rdev->mc_wreg = &r420_mc_wreg;
-	}
-	if (rdev->family >= CHIP_RV515) {
-		rdev->mc_rreg = &rv515_mc_rreg;
-		rdev->mc_wreg = &rv515_mc_wreg;
-	}
-	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
-		rdev->mc_rreg = &rs400_mc_rreg;
-		rdev->mc_wreg = &rs400_mc_wreg;
-	}
-	if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
-		rdev->mc_rreg = &rs690_mc_rreg;
-		rdev->mc_wreg = &rs690_mc_wreg;
-	}
-	if (rdev->family == CHIP_RS600) {
-		rdev->mc_rreg = &rs600_mc_rreg;
-		rdev->mc_wreg = &rs600_mc_wreg;
-	}
-	if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
-		rdev->pciep_rreg = &r600_pciep_rreg;
-		rdev->pciep_wreg = &r600_pciep_wreg;
-	}
-}
-
-
-/*
- * ASIC
- */
-int radeon_asic_init(struct radeon_device *rdev)
-{
-	radeon_register_accessor_init(rdev);
-	switch (rdev->family) {
-	case CHIP_R100:
-	case CHIP_RV100:
-	case CHIP_RS100:
-	case CHIP_RV200:
-	case CHIP_RS200:
-		rdev->asic = &r100_asic;
-		break;
-	case CHIP_R200:
-	case CHIP_RV250:
-	case CHIP_RS300:
-	case CHIP_RV280:
-		rdev->asic = &r200_asic;
-		break;
-	case CHIP_R300:
-	case CHIP_R350:
-	case CHIP_RV350:
-	case CHIP_RV380:
-		if (rdev->flags & RADEON_IS_PCIE)
-			rdev->asic = &r300_asic_pcie;
-		else
-			rdev->asic = &r300_asic;
-		break;
-	case CHIP_R420:
-	case CHIP_R423:
-	case CHIP_RV410:
-		rdev->asic = &r420_asic;
-		break;
-	case CHIP_RS400:
-	case CHIP_RS480:
-		rdev->asic = &rs400_asic;
-		break;
-	case CHIP_RS600:
-		rdev->asic = &rs600_asic;
-		break;
-	case CHIP_RS690:
-	case CHIP_RS740:
-		rdev->asic = &rs690_asic;
-		break;
-	case CHIP_RV515:
-		rdev->asic = &rv515_asic;
-		break;
-	case CHIP_R520:
-	case CHIP_RV530:
-	case CHIP_RV560:
-	case CHIP_RV570:
-	case CHIP_R580:
-		rdev->asic = &r520_asic;
-		break;
-	case CHIP_R600:
-	case CHIP_RV610:
-	case CHIP_RV630:
-	case CHIP_RV620:
-	case CHIP_RV635:
-	case CHIP_RV670:
-	case CHIP_RS780:
-	case CHIP_RS880:
-		rdev->asic = &r600_asic;
-		break;
-	case CHIP_RV770:
-	case CHIP_RV730:
-	case CHIP_RV710:
-	case CHIP_RV740:
-		rdev->asic = &rv770_asic;
-		break;
-	case CHIP_CEDAR:
-	case CHIP_REDWOOD:
-	case CHIP_JUNIPER:
-	case CHIP_CYPRESS:
-	case CHIP_HEMLOCK:
-		rdev->asic = &evergreen_asic;
-		break;
-	default:
-		/* FIXME: not supported yet */
-		return -EINVAL;
-	}
-
-	if (rdev->flags & RADEON_IS_IGP) {
-		rdev->asic->get_memory_clock = NULL;
-		rdev->asic->set_memory_clock = NULL;
-	}
-
-	return 0;
-}
-
-
-/*
- * Wrapper around modesetting bits.
- */
-int radeon_clocks_init(struct radeon_device *rdev)
-{
-	int r;
-
-	r = radeon_static_clocks_init(rdev->ddev);
-	if (r) {
-		return r;
-	}
-	DRM_INFO("Clocks initialized !\n");
-	return 0;
-}
-
-void radeon_clocks_fini(struct radeon_device *rdev)
-{
-}
-
 /* ATOM accessor methods */
 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
 {
@@ -567,29 +421,6 @@
 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 }
 
-void radeon_agp_disable(struct radeon_device *rdev)
-{
-	rdev->flags &= ~RADEON_IS_AGP;
-	if (rdev->family >= CHIP_R600) {
-		DRM_INFO("Forcing AGP to PCIE mode\n");
-		rdev->flags |= RADEON_IS_PCIE;
-	} else if (rdev->family >= CHIP_RV515 ||
-			rdev->family == CHIP_RV380 ||
-			rdev->family == CHIP_RV410 ||
-			rdev->family == CHIP_R423) {
-		DRM_INFO("Forcing AGP to PCIE mode\n");
-		rdev->flags |= RADEON_IS_PCIE;
-		rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
-		rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
-	} else {
-		DRM_INFO("Forcing AGP to PCI mode\n");
-		rdev->flags |= RADEON_IS_PCI;
-		rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
-		rdev->asic->gart_set_page = &r100_pci_gart_set_page;
-	}
-	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-}
-
 void radeon_check_arguments(struct radeon_device *rdev)
 {
 	/* vramlimit must be a power of two */
@@ -731,6 +562,14 @@
 		return r;
 	radeon_check_arguments(rdev);
 
+	/* all of the newer IGP chips have an internal gart
+	 * However some rs4xx report as AGP, so remove that here.
+	 */
+	if ((rdev->family >= CHIP_RS400) &&
+	    (rdev->flags & RADEON_IS_IGP)) {
+		rdev->flags &= ~RADEON_IS_AGP;
+	}
+
 	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
 		radeon_agp_disable(rdev);
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index ba8d806..b8d6728 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -368,10 +368,9 @@
 
 	if (rdev->bios) {
 		if (rdev->is_atom_bios) {
-			if (rdev->family >= CHIP_R600)
+			ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
+			if (ret == false)
 				ret = radeon_get_atom_connector_info_from_object_table(dev);
-			else
-				ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
 		} else {
 			ret = radeon_get_legacy_connector_info_from_bios(dev);
 			if (ret == false)
@@ -469,10 +468,19 @@
 	uint32_t best_error = 0xffffffff;
 	uint32_t best_vco_diff = 1;
 	uint32_t post_div;
+	u32 pll_out_min, pll_out_max;
 
 	DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
 	freq = freq * 1000;
 
+	if (pll->flags & RADEON_PLL_IS_LCD) {
+		pll_out_min = pll->lcd_pll_out_min;
+		pll_out_max = pll->lcd_pll_out_max;
+	} else {
+		pll_out_min = pll->pll_out_min;
+		pll_out_max = pll->pll_out_max;
+	}
+
 	if (pll->flags & RADEON_PLL_USE_REF_DIV)
 		min_ref_div = max_ref_div = pll->reference_div;
 	else {
@@ -536,10 +544,10 @@
 				tmp = (uint64_t)pll->reference_freq * feedback_div;
 				vco = radeon_div(tmp, ref_div);
 
-				if (vco < pll->pll_out_min) {
+				if (vco < pll_out_min) {
 					min_feed_div = feedback_div + 1;
 					continue;
-				} else if (vco > pll->pll_out_max) {
+				} else if (vco > pll_out_max) {
 					max_feed_div = feedback_div;
 					continue;
 				}
@@ -675,6 +683,15 @@
 {
 	fixed20_12 ffreq, max_error, error, pll_out, a;
 	u32 vco;
+	u32 pll_out_min, pll_out_max;
+
+	if (pll->flags & RADEON_PLL_IS_LCD) {
+		pll_out_min = pll->lcd_pll_out_min;
+		pll_out_max = pll->lcd_pll_out_max;
+	} else {
+		pll_out_min = pll->pll_out_min;
+		pll_out_max = pll->pll_out_max;
+	}
 
 	ffreq.full = rfixed_const(freq);
 	/* max_error = ffreq * 0.0025; */
@@ -686,7 +703,7 @@
 			vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
 			vco = vco / ((*ref_div) * 10);
 
-			if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max))
+			if ((vco < pll_out_min) || (vco > pll_out_max))
 				continue;
 
 			/* pll_out = vco / post_div; */
@@ -714,6 +731,15 @@
 {
 	u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
 	u32 best_freq = 0, vco_frequency;
+	u32 pll_out_min, pll_out_max;
+
+	if (pll->flags & RADEON_PLL_IS_LCD) {
+		pll_out_min = pll->lcd_pll_out_min;
+		pll_out_max = pll->lcd_pll_out_max;
+	} else {
+		pll_out_min = pll->pll_out_min;
+		pll_out_max = pll->pll_out_max;
+	}
 
 	/* freq = freq / 10; */
 	do_div(freq, 10);
@@ -724,7 +750,7 @@
 			goto done;
 
 		vco_frequency = freq * post_div;
-		if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
+		if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
 			goto done;
 
 		if (pll->flags & RADEON_PLL_USE_REF_DIV) {
@@ -749,7 +775,7 @@
 				continue;
 
 			vco_frequency = freq * post_div;
-			if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
+			if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
 				continue;
 			if (pll->flags & RADEON_PLL_USE_REF_DIV) {
 				ref_div = pll->reference_div;
@@ -945,6 +971,23 @@
 	return 0;
 }
 
+void radeon_update_display_priority(struct radeon_device *rdev)
+{
+	/* adjustment options for the display watermarks */
+	if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
+		/* set display priority to high for r3xx, rv515 chips
+		 * this avoids flickering due to underflow to the
+		 * display controllers during heavy acceleration.
+		 */
+		if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
+			rdev->disp_priority = 2;
+		else
+			rdev->disp_priority = 0;
+	} else
+		rdev->disp_priority = radeon_disp_priority;
+
+}
+
 int radeon_modeset_init(struct radeon_device *rdev)
 {
 	int i;
@@ -976,15 +1019,6 @@
 		radeon_combios_check_hardcoded_edid(rdev);
 	}
 
-	if (rdev->flags & RADEON_SINGLE_CRTC)
-		rdev->num_crtc = 1;
-	else {
-		if (ASIC_IS_DCE4(rdev))
-			rdev->num_crtc = 6;
-		else
-			rdev->num_crtc = 2;
-	}
-
 	/* allocate crtcs */
 	for (i = 0; i < rdev->num_crtc; i++) {
 		radeon_crtc_init(rdev->ddev, i);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 6eec0ec..055a517 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -42,9 +42,10 @@
  * KMS wrapper.
  * - 2.0.0 - initial interface
  * - 2.1.0 - add square tiling interface
+ * - 2.2.0 - add r6xx/r7xx const buffer support
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	1
+#define KMS_DRIVER_MINOR	2
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -91,6 +92,8 @@
 int radeon_new_pll = -1;
 int radeon_dynpm = -1;
 int radeon_audio = 1;
+int radeon_disp_priority = 0;
+int radeon_hw_i2c = 0;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -134,6 +137,12 @@
 MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
 module_param_named(audio, radeon_audio, int, 0444);
 
+MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
+module_param_named(disp_priority, radeon_disp_priority, int, 0444);
+
+MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
+module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
+
 static int radeon_suspend(struct drm_device *dev, pm_message_t state)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index ec55f2b..448eba8 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -107,9 +107,10 @@
  * 1.30- Add support for occlusion queries
  * 1.31- Add support for num Z pipes from GET_PARAM
  * 1.32- fixes for rv740 setup
+ * 1.33- Add r6xx/r7xx const buffer support
  */
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		32
+#define DRIVER_MINOR		33
 #define DRIVER_PATCHLEVEL	0
 
 enum radeon_cp_microcode_version {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index bc926ea..52d6f96 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -302,7 +302,7 @@
 	}
 
 	if (ASIC_IS_DCE3(rdev) &&
-	    (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
+	    (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) {
 		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 		radeon_dp_set_link_config(connector, mode);
 	}
@@ -519,7 +519,8 @@
 		break;
 	}
 
-	atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
 
 	switch (frev) {
 	case 1:
@@ -593,7 +594,6 @@
 	}
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-	r600_hdmi_enable(encoder, hdmi_detected);
 }
 
 int
@@ -708,7 +708,7 @@
 	struct radeon_connector_atom_dig *dig_connector =
 		radeon_get_atom_connector_priv_from_encoder(encoder);
 	union dig_encoder_control args;
-	int index = 0, num = 0;
+	int index = 0;
 	uint8_t frev, crev;
 
 	if (!dig || !dig_connector)
@@ -724,9 +724,9 @@
 		else
 			index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
 	}
-	num = dig->dig_encoder + 1;
 
-	atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
 
 	args.v1.ucAction = action;
 	args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
@@ -785,7 +785,7 @@
 	struct drm_connector *connector;
 	struct radeon_connector *radeon_connector;
 	union dig_transmitter_control args;
-	int index = 0, num = 0;
+	int index = 0;
 	uint8_t frev, crev;
 	bool is_dp = false;
 	int pll_id = 0;
@@ -814,7 +814,8 @@
 		}
 	}
 
-	atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
 
 	args.v1.ucAction = action;
 	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
@@ -860,15 +861,12 @@
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
 			args.v3.acConfig.ucTransmitterSel = 0;
-			num = 0;
 			break;
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
 			args.v3.acConfig.ucTransmitterSel = 1;
-			num = 1;
 			break;
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
 			args.v3.acConfig.ucTransmitterSel = 2;
-			num = 2;
 			break;
 		}
 
@@ -879,23 +877,19 @@
 				args.v3.acConfig.fCoherentMode = 1;
 		}
 	} else if (ASIC_IS_DCE32(rdev)) {
-		if (dig->dig_encoder == 1)
-			args.v2.acConfig.ucEncoderSel = 1;
+		args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
 		if (dig_connector->linkb)
 			args.v2.acConfig.ucLinkSel = 1;
 
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
 			args.v2.acConfig.ucTransmitterSel = 0;
-			num = 0;
 			break;
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
 			args.v2.acConfig.ucTransmitterSel = 1;
-			num = 1;
 			break;
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
 			args.v2.acConfig.ucTransmitterSel = 2;
-			num = 2;
 			break;
 		}
 
@@ -913,31 +907,25 @@
 		else
 			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
 
-		switch (radeon_encoder->encoder_id) {
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-			if (rdev->flags & RADEON_IS_IGP) {
-				if (radeon_encoder->pixel_clock > 165000) {
-					if (dig_connector->igp_lane_info & 0x3)
-						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
-					else if (dig_connector->igp_lane_info & 0xc)
-						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
-				} else {
-					if (dig_connector->igp_lane_info & 0x1)
-						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-					else if (dig_connector->igp_lane_info & 0x2)
-						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
-					else if (dig_connector->igp_lane_info & 0x4)
-						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
-					else if (dig_connector->igp_lane_info & 0x8)
-						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
-				}
+		if ((rdev->flags & RADEON_IS_IGP) &&
+		    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
+			if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
+				if (dig_connector->igp_lane_info & 0x1)
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+				else if (dig_connector->igp_lane_info & 0x2)
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+				else if (dig_connector->igp_lane_info & 0x4)
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+				else if (dig_connector->igp_lane_info & 0x8)
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+			} else {
+				if (dig_connector->igp_lane_info & 0x3)
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+				else if (dig_connector->igp_lane_info & 0xc)
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
 			}
-			break;
 		}
 
-		if (radeon_encoder->pixel_clock > 165000)
-			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
-
 		if (dig_connector->linkb)
 			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
 		else
@@ -948,6 +936,8 @@
 		else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
 			if (dig->coherent_mode)
 				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+			if (radeon_encoder->pixel_clock > 165000)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
 		}
 	}
 
@@ -1054,16 +1044,25 @@
 	if (is_dig) {
 		switch (mode) {
 		case DRM_MODE_DPMS_ON:
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
-			{
+			if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
 				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
 				dp_link_train(encoder, connector);
+				if (ASIC_IS_DCE4(rdev))
+					atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
 			}
+			if (!ASIC_IS_DCE4(rdev))
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
 			break;
 		case DRM_MODE_DPMS_STANDBY:
 		case DRM_MODE_DPMS_SUSPEND:
 		case DRM_MODE_DPMS_OFF:
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+			if (!ASIC_IS_DCE4(rdev))
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+			if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
+				if (ASIC_IS_DCE4(rdev))
+					atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
+			}
 			break;
 		}
 	} else {
@@ -1104,7 +1103,8 @@
 
 	memset(&args, 0, sizeof(args));
 
-	atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
 
 	switch (frev) {
 	case 1:
@@ -1216,6 +1216,9 @@
 	}
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* update scratch regs with new routing */
+	radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
 }
 
 static void
@@ -1326,19 +1329,9 @@
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
 
-	if (radeon_encoder->active_device &
-	    (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
-		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-		if (dig)
-			dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
-	}
 	radeon_encoder->pixel_clock = adjusted_mode->clock;
 
-	radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
-	atombios_set_encoder_crtc_source(encoder);
-
 	if (ASIC_IS_AVIVO(rdev)) {
 		if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
 			atombios_yuv_setup(encoder, true);
@@ -1396,9 +1389,10 @@
 	}
 	atombios_apply_encoder_quirks(encoder, adjusted_mode);
 
-	/* XXX */
-	if (!ASIC_IS_DCE4(rdev))
+	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+		r600_hdmi_enable(encoder);
 		r600_hdmi_setmode(encoder, adjusted_mode);
+	}
 }
 
 static bool
@@ -1418,7 +1412,8 @@
 
 		memset(&args, 0, sizeof(args));
 
-		atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+		if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+			return false;
 
 		args.sDacload.ucMisc = 0;
 
@@ -1492,8 +1487,20 @@
 
 static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
 {
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	if (radeon_encoder->active_device &
+	    (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
+		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+		if (dig)
+			dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+	}
+
 	radeon_atom_output_lock(encoder, true);
 	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	/* this is needed for the pll/ss setup to work correctly in some cases */
+	atombios_set_encoder_crtc_source(encoder);
 }
 
 static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
@@ -1509,6 +1516,8 @@
 	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
 	if (radeon_encoder_is_digital(encoder)) {
+		if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+			r600_hdmi_disable(encoder);
 		dig = radeon_encoder->enc_priv;
 		dig->dig_encoder = -1;
 	}
@@ -1659,6 +1668,4 @@
 		drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
 		break;
 	}
-
-	r600_hdmi_init(encoder);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 4ae50c1..5def6f5 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -59,6 +59,7 @@
 	return false;
 }
 
+/* bit banging i2c */
 
 static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
 {
@@ -181,13 +182,30 @@
 	WREG32(rec->en_data_reg, val);
 }
 
+static int pre_xfer(struct i2c_adapter *i2c_adap)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+
+	radeon_i2c_do_lock(i2c, 1);
+
+	return 0;
+}
+
+static void post_xfer(struct i2c_adapter *i2c_adap)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+
+	radeon_i2c_do_lock(i2c, 0);
+}
+
+/* hw i2c */
+
 static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
 {
-	struct radeon_pll *spll = &rdev->clock.spll;
 	u32 sclk = radeon_get_engine_clock(rdev);
 	u32 prescale = 0;
-	u32 n, m;
-	u8 loop;
+	u32 nm;
+	u8 n, m, loop;
 	int i2c_clock;
 
 	switch (rdev->family) {
@@ -203,13 +221,15 @@
 	case CHIP_R300:
 	case CHIP_R350:
 	case CHIP_RV350:
-		n = (spll->reference_freq) / (4 * 6);
+		i2c_clock = 60;
+		nm = (sclk * 10) / (i2c_clock * 4);
 		for (loop = 1; loop < 255; loop++) {
-			if ((loop * (loop - 1)) > n)
+			if ((nm / loop) < loop)
 				break;
 		}
-		m = loop - 1;
-		prescale = m | (loop << 8);
+		n = loop - 1;
+		m = loop - 2;
+		prescale = m | (n << 8);
 		break;
 	case CHIP_RV380:
 	case CHIP_RS400:
@@ -217,7 +237,6 @@
 	case CHIP_R420:
 	case CHIP_R423:
 	case CHIP_RV410:
-		sclk = radeon_get_engine_clock(rdev);
 		prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
 		break;
 	case CHIP_RS600:
@@ -232,7 +251,6 @@
 	case CHIP_RV570:
 	case CHIP_R580:
 		i2c_clock = 50;
-		sclk = radeon_get_engine_clock(rdev);
 		if (rdev->family == CHIP_R520)
 			prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
 		else
@@ -291,6 +309,7 @@
 	prescale = radeon_get_i2c_prescale(rdev);
 
 	reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
+	       RADEON_I2C_DRIVE_EN |
 	       RADEON_I2C_START |
 	       RADEON_I2C_STOP |
 	       RADEON_I2C_GO);
@@ -757,26 +776,13 @@
 	return ret;
 }
 
-static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap,
+static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
 			      struct i2c_msg *msgs, int num)
 {
 	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
-	int ret;
-
-	radeon_i2c_do_lock(i2c, 1);
-	ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num);
-	radeon_i2c_do_lock(i2c, 0);
-
-	return ret;
-}
-
-static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
-			   struct i2c_msg *msgs, int num)
-{
-	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
 	struct radeon_device *rdev = i2c->dev->dev_private;
 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
-	int ret;
+	int ret = 0;
 
 	switch (rdev->family) {
 	case CHIP_R100:
@@ -797,16 +803,12 @@
 	case CHIP_RV410:
 	case CHIP_RS400:
 	case CHIP_RS480:
-		if (rec->hw_capable)
-			ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
-		else
-			ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+		ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
 		break;
 	case CHIP_RS600:
 	case CHIP_RS690:
 	case CHIP_RS740:
 		/* XXX fill in hw i2c implementation */
-		ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
 		break;
 	case CHIP_RV515:
 	case CHIP_R520:
@@ -814,20 +816,16 @@
 	case CHIP_RV560:
 	case CHIP_RV570:
 	case CHIP_R580:
-		if (rec->hw_capable) {
-			if (rec->mm_i2c)
-				ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
-			else
-				ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
-		} else
-			ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+		if (rec->mm_i2c)
+			ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+		else
+			ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
 		break;
 	case CHIP_R600:
 	case CHIP_RV610:
 	case CHIP_RV630:
 	case CHIP_RV670:
 		/* XXX fill in hw i2c implementation */
-		ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
 		break;
 	case CHIP_RV620:
 	case CHIP_RV635:
@@ -838,7 +836,6 @@
 	case CHIP_RV710:
 	case CHIP_RV740:
 		/* XXX fill in hw i2c implementation */
-		ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
 		break;
 	case CHIP_CEDAR:
 	case CHIP_REDWOOD:
@@ -846,7 +843,6 @@
 	case CHIP_CYPRESS:
 	case CHIP_HEMLOCK:
 		/* XXX fill in hw i2c implementation */
-		ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
 		break;
 	default:
 		DRM_ERROR("i2c: unhandled radeon chip\n");
@@ -857,20 +853,21 @@
 	return ret;
 }
 
-static u32 radeon_i2c_func(struct i2c_adapter *adap)
+static u32 radeon_hw_i2c_func(struct i2c_adapter *adap)
 {
 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
 static const struct i2c_algorithm radeon_i2c_algo = {
-	.master_xfer = radeon_i2c_xfer,
-	.functionality = radeon_i2c_func,
+	.master_xfer = radeon_hw_i2c_xfer,
+	.functionality = radeon_hw_i2c_func,
 };
 
 struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
 					  struct radeon_i2c_bus_rec *rec,
 					  const char *name)
 {
+	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_i2c_chan *i2c;
 	int ret;
 
@@ -878,37 +875,43 @@
 	if (i2c == NULL)
 		return NULL;
 
-	/* set the internal bit adapter */
-	i2c->algo.radeon.bit_adapter.owner = THIS_MODULE;
-	i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c);
-	sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name);
-	i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data;
-	i2c->algo.radeon.bit_data.setsda = set_data;
-	i2c->algo.radeon.bit_data.setscl = set_clock;
-	i2c->algo.radeon.bit_data.getsda = get_data;
-	i2c->algo.radeon.bit_data.getscl = get_clock;
-	i2c->algo.radeon.bit_data.udelay = 20;
-	/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
-	 * make this, 2 jiffies is a lot more reliable */
-	i2c->algo.radeon.bit_data.timeout = 2;
-	i2c->algo.radeon.bit_data.data = i2c;
-	ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter);
-	if (ret) {
-		DRM_ERROR("Failed to register internal bit i2c %s\n", name);
-		goto out_free;
-	}
-	/* set the radeon i2c adapter */
-	i2c->dev = dev;
 	i2c->rec = *rec;
 	i2c->adapter.owner = THIS_MODULE;
+	i2c->dev = dev;
 	i2c_set_adapdata(&i2c->adapter, i2c);
-	sprintf(i2c->adapter.name, "Radeon i2c %s", name);
-	i2c->adapter.algo_data = &i2c->algo.radeon;
-	i2c->adapter.algo = &radeon_i2c_algo;
-	ret = i2c_add_adapter(&i2c->adapter);
-	if (ret) {
-		DRM_ERROR("Failed to register i2c %s\n", name);
-		goto out_free;
+	if (rec->mm_i2c ||
+	    (rec->hw_capable &&
+	     radeon_hw_i2c &&
+	     ((rdev->family <= CHIP_RS480) ||
+	      ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
+		/* set the radeon hw i2c adapter */
+		sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name);
+		i2c->adapter.algo = &radeon_i2c_algo;
+		ret = i2c_add_adapter(&i2c->adapter);
+		if (ret) {
+			DRM_ERROR("Failed to register hw i2c %s\n", name);
+			goto out_free;
+		}
+	} else {
+		/* set the radeon bit adapter */
+		sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name);
+		i2c->adapter.algo_data = &i2c->algo.bit;
+		i2c->algo.bit.pre_xfer = pre_xfer;
+		i2c->algo.bit.post_xfer = post_xfer;
+		i2c->algo.bit.setsda = set_data;
+		i2c->algo.bit.setscl = set_clock;
+		i2c->algo.bit.getsda = get_data;
+		i2c->algo.bit.getscl = get_clock;
+		i2c->algo.bit.udelay = 20;
+		/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
+		 * make this, 2 jiffies is a lot more reliable */
+		i2c->algo.bit.timeout = 2;
+		i2c->algo.bit.data = i2c;
+		ret = i2c_bit_add_bus(&i2c->adapter);
+		if (ret) {
+			DRM_ERROR("Failed to register bit i2c %s\n", name);
+			goto out_free;
+		}
 	}
 
 	return i2c;
@@ -953,16 +956,6 @@
 {
 	if (!i2c)
 		return;
-	i2c_del_adapter(&i2c->algo.radeon.bit_adapter);
-	i2c_del_adapter(&i2c->adapter);
-	kfree(i2c);
-}
-
-void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c)
-{
-	if (!i2c)
-		return;
-
 	i2c_del_adapter(&i2c->adapter);
 	kfree(i2c);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index ea4c645..a212041 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -67,9 +67,10 @@
 
 	/* Disable *all* interrupts */
 	rdev->irq.sw_int = false;
-	for (i = 0; i < 2; i++) {
+	for (i = 0; i < rdev->num_crtc; i++)
 		rdev->irq.crtc_vblank_int[i] = false;
-	}
+	for (i = 0; i < 6; i++)
+		rdev->irq.hpd[i] = false;
 	radeon_irq_set(rdev);
 	/* Clear bits */
 	radeon_irq_process(rdev);
@@ -95,28 +96,29 @@
 	}
 	/* Disable *all* interrupts */
 	rdev->irq.sw_int = false;
-	for (i = 0; i < 2; i++) {
+	for (i = 0; i < rdev->num_crtc; i++)
 		rdev->irq.crtc_vblank_int[i] = false;
+	for (i = 0; i < 6; i++)
 		rdev->irq.hpd[i] = false;
-	}
 	radeon_irq_set(rdev);
 }
 
 int radeon_irq_kms_init(struct radeon_device *rdev)
 {
 	int r = 0;
-	int num_crtc = 2;
 
-	if (rdev->flags & RADEON_SINGLE_CRTC)
-		num_crtc = 1;
 	spin_lock_init(&rdev->irq.sw_lock);
-	r = drm_vblank_init(rdev->ddev, num_crtc);
+	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
 	if (r) {
 		return r;
 	}
 	/* enable msi */
 	rdev->msi_enabled = 0;
-	if (rdev->family >= CHIP_RV380) {
+	/* MSIs don't seem to work reliably on all IGP
+	 * chips.  Disable MSI on them for now.
+	 */
+	if ((rdev->family >= CHIP_RV380) &&
+	    (!(rdev->flags & RADEON_IS_IGP))) {
 		int ret = pci_enable_msi(rdev->pdev);
 		if (!ret) {
 			rdev->msi_enabled = 1;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index df23d6a..88865e3 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -603,6 +603,10 @@
 				      ? RADEON_CRTC2_INTERLACE_EN
 				      : 0));
 
+		/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+		if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+			crtc2_gen_cntl |= RADEON_CRTC2_EN;
+
 		disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
 		disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
 
@@ -630,6 +634,10 @@
 				    ? RADEON_CRTC_INTERLACE_EN
 				    : 0));
 
+		/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+		if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+			crtc_gen_cntl |= RADEON_CRTC_EN;
+
 		crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
 		crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
 				  RADEON_CRTC_VSYNC_DIS |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 417684d..f2ed27c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -57,6 +57,10 @@
 #define NTSC_TV_PLL_N_14 693
 #define NTSC_TV_PLL_P_14 7
 
+#define PAL_TV_PLL_M_14 19
+#define PAL_TV_PLL_N_14 353
+#define PAL_TV_PLL_P_14 5
+
 #define VERT_LEAD_IN_LINES 2
 #define FRAC_BITS 0xe
 #define FRAC_MASK 0x3fff
@@ -205,9 +209,24 @@
 		630627,             /* defRestart */
 		347,                /* crtcPLL_N */
 		14,                 /* crtcPLL_M */
-			8,                  /* crtcPLL_postDiv */
+		8,                  /* crtcPLL_postDiv */
 		1022,               /* pixToTV */
 	},
+	{ /* PAL timing for 14 Mhz ref clk */
+		800,                /* horResolution */
+		600,                /* verResolution */
+		TV_STD_PAL,         /* standard */
+		1131,               /* horTotal */
+		742,                /* verTotal */
+		813,                /* horStart */
+		840,                /* horSyncStart */
+		633,                /* verSyncStart */
+		708369,             /* defRestart */
+		211,                /* crtcPLL_N */
+		9,                  /* crtcPLL_M */
+		8,                  /* crtcPLL_postDiv */
+		759,                /* pixToTV */
+	},
 };
 
 #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
@@ -242,7 +261,7 @@
 		if (pll->reference_freq == 2700)
 			const_ptr = &available_tv_modes[1];
 		else
-			const_ptr = &available_tv_modes[1]; /* FIX ME */
+			const_ptr = &available_tv_modes[3];
 	}
 	return const_ptr;
 }
@@ -685,9 +704,9 @@
 			n = PAL_TV_PLL_N_27;
 			p = PAL_TV_PLL_P_27;
 		} else {
-			m = PAL_TV_PLL_M_27;
-			n = PAL_TV_PLL_N_27;
-			p = PAL_TV_PLL_P_27;
+			m = PAL_TV_PLL_M_14;
+			n = PAL_TV_PLL_N_14;
+			p = PAL_TV_PLL_P_14;
 		}
 	}
 
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 1702b82..0b8e327 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -129,6 +129,7 @@
 #define RADEON_PLL_USE_FRAC_FB_DIV      (1 << 10)
 #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
 #define RADEON_PLL_USE_POST_DIV         (1 << 12)
+#define RADEON_PLL_IS_LCD               (1 << 13)
 
 /* pll algo */
 enum radeon_pll_algo {
@@ -149,6 +150,8 @@
 	uint32_t pll_in_max;
 	uint32_t pll_out_min;
 	uint32_t pll_out_max;
+	uint32_t lcd_pll_out_min;
+	uint32_t lcd_pll_out_max;
 	uint32_t best_vco;
 
 	/* divider limits */
@@ -170,17 +173,12 @@
 	enum radeon_pll_algo algo;
 };
 
-struct i2c_algo_radeon_data {
-	struct i2c_adapter bit_adapter;
-	struct i2c_algo_bit_data bit_data;
-};
-
 struct radeon_i2c_chan {
 	struct i2c_adapter adapter;
 	struct drm_device *dev;
 	union {
+		struct i2c_algo_bit_data bit;
 		struct i2c_algo_dp_aux_data dp;
-		struct i2c_algo_radeon_data radeon;
 	} algo;
 	struct radeon_i2c_bus_rec rec;
 };
@@ -342,6 +340,7 @@
 	struct drm_display_mode native_mode;
 	void *enc_priv;
 	int hdmi_offset;
+	int hdmi_config_offset;
 	int hdmi_audio_workaround;
 	int hdmi_buffer_status;
 };
@@ -431,7 +430,6 @@
 						 struct radeon_i2c_bus_rec *rec,
 						 const char *name);
 extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
-extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c);
 extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
 				u8 slave_addr,
 				u8 addr,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index fc9d00a..dc7e3f4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -185,8 +185,10 @@
 		return 0;
 	}
 	radeon_ttm_placement_from_domain(bo, domain);
-	/* force to pin into visible video ram */
-	bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+	if (domain == RADEON_GEM_DOMAIN_VRAM) {
+		/* force to pin into visible video ram */
+		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+	}
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d4d1c39..a4b5749 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -28,6 +28,7 @@
 #define RADEON_RECLOCK_DELAY_MS 200
 #define RADEON_WAIT_VBLANK_TIMEOUT 200
 
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
 static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
 static void radeon_pm_set_clocks(struct radeon_device *rdev);
 static void radeon_pm_idle_work_handler(struct work_struct *work);
@@ -179,6 +180,16 @@
 		 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
 }
 
+static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
+{
+	if (rdev->pm.active_crtcs) {
+		rdev->pm.vblank_sync = false;
+		wait_event_timeout(
+			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
+			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
+	}
+}
+
 static void radeon_set_power_state(struct radeon_device *rdev)
 {
 	/* if *_clock_mode are the same, *_power_state are as well */
@@ -189,11 +200,28 @@
 		 rdev->pm.requested_clock_mode->sclk,
 		 rdev->pm.requested_clock_mode->mclk,
 		 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
+
 	/* set pcie lanes */
+	/* TODO */
+
 	/* set voltage */
+	/* TODO */
+
 	/* set engine clock */
+	radeon_sync_with_vblank(rdev);
+	radeon_pm_debug_check_in_vbl(rdev, false);
 	radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
+	radeon_pm_debug_check_in_vbl(rdev, true);
+
+#if 0
 	/* set memory clock */
+	if (rdev->asic->set_memory_clock) {
+		radeon_sync_with_vblank(rdev);
+		radeon_pm_debug_check_in_vbl(rdev, false);
+		radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk);
+		radeon_pm_debug_check_in_vbl(rdev, true);
+	}
+#endif
 
 	rdev->pm.current_power_state = rdev->pm.requested_power_state;
 	rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
@@ -229,6 +257,12 @@
 	return 0;
 }
 
+void radeon_pm_fini(struct radeon_device *rdev)
+{
+	if (rdev->pm.i2c_bus)
+		radeon_i2c_destroy(rdev->pm.i2c_bus);
+}
+
 void radeon_pm_compute_clocks(struct radeon_device *rdev)
 {
 	struct drm_device *ddev = rdev->ddev;
@@ -245,7 +279,8 @@
 	list_for_each_entry(connector,
 		&ddev->mode_config.connector_list, head) {
 		if (connector->encoder &&
-			connector->dpms != DRM_MODE_DPMS_OFF) {
+		    connector->encoder->crtc &&
+		    connector->dpms != DRM_MODE_DPMS_OFF) {
 			radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
 			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
 			++count;
@@ -333,10 +368,7 @@
 		break;
 	}
 
-	/* check if we are in vblank */
-	radeon_pm_debug_check_in_vbl(rdev, false);
 	radeon_set_power_state(rdev);
-	radeon_pm_debug_check_in_vbl(rdev, true);
 	rdev->pm.planned_action = PM_ACTION_NONE;
 }
 
@@ -353,10 +385,7 @@
 		rdev->pm.req_vblank |= (1 << 1);
 		drm_vblank_get(rdev->ddev, 1);
 	}
-	if (rdev->pm.active_crtcs)
-		wait_event_interruptible_timeout(
-			rdev->irq.vblank_queue, 0,
-			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
+	radeon_pm_set_clocks_locked(rdev);
 	if (rdev->pm.req_vblank & (1 << 0)) {
 		rdev->pm.req_vblank &= ~(1 << 0);
 		drm_vblank_put(rdev->ddev, 0);
@@ -366,7 +395,6 @@
 		drm_vblank_put(rdev->ddev, 1);
 	}
 
-	radeon_pm_set_clocks_locked(rdev);
 	mutex_unlock(&rdev->cp.mutex);
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 5c0dc08..eabbc9c 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -346,6 +346,7 @@
 #       define RADEON_TVPLL_PWRMGT_OFF      (1 << 30)
 #       define RADEON_TVCLK_TURNOFF         (1 << 31)
 #define RADEON_PLL_PWRMGT_CNTL              0x0015 /* PLL */
+#	define RADEON_PM_MODE_SEL           (1 << 13)
 #       define RADEON_TCL_BYPASS_DISABLE    (1 << 20)
 #define RADEON_CLR_CMP_CLR_3D               0x1a24
 #define RADEON_CLR_CMP_CLR_DST              0x15c8
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 8f414a5..af0da4a 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -26,20 +26,16 @@
 0x00028408 VGT_INDX_OFFSET
 0x00028AA0 VGT_INSTANCE_STEP_RATE_0
 0x00028AA4 VGT_INSTANCE_STEP_RATE_1
-0x000088C0 VGT_LAST_COPY_STATE
 0x00028400 VGT_MAX_VTX_INDX
-0x000088D8 VGT_MC_LAT_CNTL
 0x00028404 VGT_MIN_VTX_INDX
 0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
 0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
 0x00008970 VGT_NUM_INDICES
 0x00008974 VGT_NUM_INSTANCES
 0x00028A10 VGT_OUTPUT_PATH_CNTL
-0x00028C5C VGT_OUT_DEALLOC_CNTL
 0x00028A84 VGT_PRIMITIVEID_EN
 0x00008958 VGT_PRIMITIVE_TYPE
 0x00028AB4 VGT_REUSE_OFF
-0x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL
 0x00028AB8 VGT_VTX_CNT_EN
 0x000088B0 VGT_VTX_VECT_EJECT_REG
 0x00028810 PA_CL_CLIP_CNTL
@@ -280,7 +276,6 @@
 0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
 0x00028814 PA_SU_SC_MODE_CNTL
 0x00028C08 PA_SU_VTX_CNTL
-0x00008C00 SQ_CONFIG
 0x00008C04 SQ_GPR_RESOURCE_MGMT_1
 0x00008C08 SQ_GPR_RESOURCE_MGMT_2
 0x00008C10 SQ_STACK_RESOURCE_MGMT_1
@@ -320,18 +315,6 @@
 0x000283FC SQ_VTX_SEMANTIC_31
 0x000288E0 SQ_VTX_SEMANTIC_CLEAR
 0x0003CFF4 SQ_VTX_START_INST_LOC
-0x0003C000 SQ_TEX_SAMPLER_WORD0_0
-0x0003C004 SQ_TEX_SAMPLER_WORD1_0
-0x0003C008 SQ_TEX_SAMPLER_WORD2_0
-0x00030000 SQ_ALU_CONSTANT0_0
-0x00030004 SQ_ALU_CONSTANT1_0
-0x00030008 SQ_ALU_CONSTANT2_0
-0x0003000C SQ_ALU_CONSTANT3_0
-0x0003E380 SQ_BOOL_CONST_0
-0x0003E384 SQ_BOOL_CONST_1
-0x0003E388 SQ_BOOL_CONST_2
-0x0003E200 SQ_LOOP_CONST_0
-0x0003E200 SQ_LOOP_CONST_DX10_0
 0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
 0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
 0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
@@ -380,54 +363,6 @@
 0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
 0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
 0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
-0x000289C0 SQ_ALU_CONST_CACHE_GS_0
-0x000289C4 SQ_ALU_CONST_CACHE_GS_1
-0x000289C8 SQ_ALU_CONST_CACHE_GS_2
-0x000289CC SQ_ALU_CONST_CACHE_GS_3
-0x000289D0 SQ_ALU_CONST_CACHE_GS_4
-0x000289D4 SQ_ALU_CONST_CACHE_GS_5
-0x000289D8 SQ_ALU_CONST_CACHE_GS_6
-0x000289DC SQ_ALU_CONST_CACHE_GS_7
-0x000289E0 SQ_ALU_CONST_CACHE_GS_8
-0x000289E4 SQ_ALU_CONST_CACHE_GS_9
-0x000289E8 SQ_ALU_CONST_CACHE_GS_10
-0x000289EC SQ_ALU_CONST_CACHE_GS_11
-0x000289F0 SQ_ALU_CONST_CACHE_GS_12
-0x000289F4 SQ_ALU_CONST_CACHE_GS_13
-0x000289F8 SQ_ALU_CONST_CACHE_GS_14
-0x000289FC SQ_ALU_CONST_CACHE_GS_15
-0x00028940 SQ_ALU_CONST_CACHE_PS_0
-0x00028944 SQ_ALU_CONST_CACHE_PS_1
-0x00028948 SQ_ALU_CONST_CACHE_PS_2
-0x0002894C SQ_ALU_CONST_CACHE_PS_3
-0x00028950 SQ_ALU_CONST_CACHE_PS_4
-0x00028954 SQ_ALU_CONST_CACHE_PS_5
-0x00028958 SQ_ALU_CONST_CACHE_PS_6
-0x0002895C SQ_ALU_CONST_CACHE_PS_7
-0x00028960 SQ_ALU_CONST_CACHE_PS_8
-0x00028964 SQ_ALU_CONST_CACHE_PS_9
-0x00028968 SQ_ALU_CONST_CACHE_PS_10
-0x0002896C SQ_ALU_CONST_CACHE_PS_11
-0x00028970 SQ_ALU_CONST_CACHE_PS_12
-0x00028974 SQ_ALU_CONST_CACHE_PS_13
-0x00028978 SQ_ALU_CONST_CACHE_PS_14
-0x0002897C SQ_ALU_CONST_CACHE_PS_15
-0x00028980 SQ_ALU_CONST_CACHE_VS_0
-0x00028984 SQ_ALU_CONST_CACHE_VS_1
-0x00028988 SQ_ALU_CONST_CACHE_VS_2
-0x0002898C SQ_ALU_CONST_CACHE_VS_3
-0x00028990 SQ_ALU_CONST_CACHE_VS_4
-0x00028994 SQ_ALU_CONST_CACHE_VS_5
-0x00028998 SQ_ALU_CONST_CACHE_VS_6
-0x0002899C SQ_ALU_CONST_CACHE_VS_7
-0x000289A0 SQ_ALU_CONST_CACHE_VS_8
-0x000289A4 SQ_ALU_CONST_CACHE_VS_9
-0x000289A8 SQ_ALU_CONST_CACHE_VS_10
-0x000289AC SQ_ALU_CONST_CACHE_VS_11
-0x000289B0 SQ_ALU_CONST_CACHE_VS_12
-0x000289B4 SQ_ALU_CONST_CACHE_VS_13
-0x000289B8 SQ_ALU_CONST_CACHE_VS_14
-0x000289BC SQ_ALU_CONST_CACHE_VS_15
 0x000288D8 SQ_PGM_CF_OFFSET_ES
 0x000288DC SQ_PGM_CF_OFFSET_FS
 0x000288D4 SQ_PGM_CF_OFFSET_GS
@@ -494,12 +429,7 @@
 0x00028438 SX_ALPHA_REF
 0x00028410 SX_ALPHA_TEST_CONTROL
 0x00028350 SX_MISC
-0x0000A020 SMX_DC_CTL0
-0x0000A024 SMX_DC_CTL1
-0x0000A028 SMX_DC_CTL2
-0x00009608 TC_CNTL
 0x00009604 TC_INVALIDATE
-0x00009490 TD_CNTL
 0x00009400 TD_FILTER4
 0x00009404 TD_FILTER4_1
 0x00009408 TD_FILTER4_2
@@ -824,14 +754,9 @@
 0x00028428 CB_FOG_GREEN
 0x00028424 CB_FOG_RED
 0x00008040 WAIT_UNTIL
-0x00008950 CC_GC_SHADER_PIPE_CONFIG
-0x00008954 GC_USER_SHADER_PIPE_CONFIG
 0x00009714 VC_ENHANCE
 0x00009830 DB_DEBUG
 0x00009838 DB_WATERMARKS
 0x00028D28 DB_SRESULTS_COMPARE_STATE0
 0x00028D44 DB_ALPHA_TO_MASK
-0x00009504 TA_CNTL
 0x00009700 VC_CNTL
-0x00009718 VC_CONFIG
-0x0000A02C SMX_DC_MC_INTF_CTL
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 626d518..626aaf0 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -28,6 +28,7 @@
 #include <linux/seq_file.h>
 #include <drm/drmP.h>
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "rs400d.h"
 
 /* This files gather functions specifics to : rs400,rs480 */
@@ -202,9 +203,9 @@
 
 void rs400_gart_fini(struct radeon_device *rdev)
 {
+	radeon_gart_fini(rdev);
 	rs400_gart_disable(rdev);
 	radeon_gart_table_ram_free(rdev);
-	radeon_gart_fini(rdev);
 }
 
 int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
@@ -264,6 +265,7 @@
 	base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
 	radeon_vram_location(rdev, &rdev->mc, base);
 	radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
 }
 
 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -388,6 +390,8 @@
 {
 	int r;
 
+	r100_set_common_regs(rdev);
+
 	rs400_mc_program(rdev);
 	/* Resume clock */
 	r300_clock_startup(rdev);
@@ -453,6 +457,7 @@
 
 void rs400_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 47f046b..abf824c 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -37,6 +37,7 @@
  */
 #include "drmP.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "atom.h"
 #include "rs600d.h"
 
@@ -267,9 +268,9 @@
 
 void rs600_gart_fini(struct radeon_device *rdev)
 {
+	radeon_gart_fini(rdev);
 	rs600_gart_disable(rdev);
 	radeon_gart_table_vram_free(rdev);
-	radeon_gart_fini(rdev);
 }
 
 #define R600_PTE_VALID     (1 << 0)
@@ -392,10 +393,12 @@
 		/* Vertical blank interrupts */
 		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
 			drm_handle_vblank(rdev->ddev, 0);
+			rdev->pm.vblank_sync = true;
 			wake_up(&rdev->irq.vblank_queue);
 		}
 		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
 			drm_handle_vblank(rdev->ddev, 1);
+			rdev->pm.vblank_sync = true;
 			wake_up(&rdev->irq.vblank_queue);
 		}
 		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
@@ -472,13 +475,38 @@
 	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
 	base = RREG32_MC(R_000004_MC_FB_LOCATION);
 	base = G_000004_MC_FB_START(base) << 16;
+	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
 	radeon_vram_location(rdev, &rdev->mc, base);
 	radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
 }
 
 void rs600_bandwidth_update(struct radeon_device *rdev)
 {
-	/* FIXME: implement, should this be like rs690 ? */
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
+	/* FIXME: implement full support */
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+
+	rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+	if (rdev->disp_priority == 2) {
+		d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
+		d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
+		d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+		d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+	}
 }
 
 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -598,6 +626,7 @@
 
 void rs600_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index c1c8f58..e52d269 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -535,4 +535,57 @@
 #define   G_00016C_INVALIDATE_L1_TLB(x)                (((x) >> 20) & 0x1)
 #define   C_00016C_INVALIDATE_L1_TLB                   0xFFEFFFFF
 
+#define R_006548_D1MODE_PRIORITY_A_CNT               0x006548
+#define   S_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006548_D1MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT               0x00654C
+#define   S_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_00654C_D1MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+#define R_006D48_D2MODE_PRIORITY_A_CNT               0x006D48
+#define   S_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D48_D2MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT               0x006D4C
+#define   S_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D4C_D2MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+
 #endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 83b9174..bbf3da7 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -27,6 +27,7 @@
  */
 #include "drmP.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "atom.h"
 #include "rs690d.h"
 
@@ -57,42 +58,57 @@
 	}
 }
 
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
+};
+
 void rs690_pm_info(struct radeon_device *rdev)
 {
 	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
-	struct _ATOM_INTEGRATED_SYSTEM_INFO *info;
-	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2;
-	void *ptr;
+	union igp_info *info;
 	uint16_t data_offset;
 	uint8_t frev, crev;
 	fixed20_12 tmp;
 
-	atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
-			       &frev, &crev, &data_offset);
-	ptr = rdev->mode_info.atom_context->bios + data_offset;
-	info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr;
-	info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr;
-	/* Get various system informations from bios */
-	switch (crev) {
-	case 1:
-		tmp.full = rfixed_const(100);
-		rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock);
-		rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-		rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock));
-		rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock));
-		rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth);
-		break;
-	case 2:
-		tmp.full = rfixed_const(100);
-		rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock);
-		rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-		rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock);
-		rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
-		rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq);
-		rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
-		rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth));
-		break;
-	default:
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
+
+		/* Get various system informations from bios */
+		switch (crev) {
+		case 1:
+			tmp.full = rfixed_const(100);
+			rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock);
+			rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+			rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+			rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock));
+			rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth);
+			break;
+		case 2:
+			tmp.full = rfixed_const(100);
+			rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock);
+			rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+			rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock);
+			rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
+			rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq);
+			rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
+			rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
+			break;
+		default:
+			tmp.full = rfixed_const(100);
+			/* We assume the slower possible clock ie worst case */
+			/* DDR 333Mhz */
+			rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
+			/* FIXME: system clock ? */
+			rdev->pm.igp_system_mclk.full = rfixed_const(100);
+			rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
+			rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
+			rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+			DRM_ERROR("No integrated system info for your GPU, using safe default\n");
+			break;
+		}
+	} else {
 		tmp.full = rfixed_const(100);
 		/* We assume the slower possible clock ie worst case */
 		/* DDR 333Mhz */
@@ -103,7 +119,6 @@
 		rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
 		rdev->pm.igp_ht_link_width.full = rfixed_const(8);
 		DRM_ERROR("No integrated system info for your GPU, using safe default\n");
-		break;
 	}
 	/* Compute various bandwidth */
 	/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4  */
@@ -131,7 +146,6 @@
 
 void rs690_mc_init(struct radeon_device *rdev)
 {
-	fixed20_12 a;
 	u64 base;
 
 	rs400_gart_adjust_size(rdev);
@@ -145,18 +159,10 @@
 	base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
 	base = G_000100_MC_FB_START(base) << 16;
 	rs690_pm_info(rdev);
-	/* FIXME: we should enforce default clock in case GPU is not in
-	 * default setup
-	 */
-	a.full = rfixed_const(100);
-	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-	a.full = rfixed_const(16);
-	/* core_bandwidth = sclk(Mhz) * 16 */
-	rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
 	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
 	radeon_vram_location(rdev, &rdev->mc, base);
 	radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
 }
 
 void rs690_line_buffer_adjust(struct radeon_device *rdev,
@@ -394,10 +400,12 @@
 	struct drm_display_mode *mode1 = NULL;
 	struct rs690_watermark wm0;
 	struct rs690_watermark wm1;
-	u32 tmp;
+	u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
 	fixed20_12 priority_mark02, priority_mark12, fill_rate;
 	fixed20_12 a, b;
 
+	radeon_update_display_priority(rdev);
+
 	if (rdev->mode_info.crtcs[0]->base.enabled)
 		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
 	if (rdev->mode_info.crtcs[1]->base.enabled)
@@ -407,7 +415,8 @@
 	 * modes if the user specifies HIGH for displaypriority
 	 * option.
 	 */
-	if (rdev->disp_priority == 2) {
+	if ((rdev->disp_priority == 2) &&
+	    ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
 		tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
 		tmp &= C_000104_MC_DISP0R_INIT_LAT;
 		tmp &= C_000104_MC_DISP1R_INIT_LAT;
@@ -482,10 +491,16 @@
 			priority_mark12.full = 0;
 		if (wm1.priority_mark_max.full > priority_mark12.full)
 			priority_mark12.full = wm1.priority_mark_max.full;
-		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
-		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
-		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
-		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+		d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+		d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2) {
+			d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+			d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+		}
+		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
 	} else if (mode0) {
 		if (rfixed_trunc(wm0.dbpp) > 64)
 			a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
@@ -512,8 +527,11 @@
 			priority_mark02.full = 0;
 		if (wm0.priority_mark_max.full > priority_mark02.full)
 			priority_mark02.full = wm0.priority_mark_max.full;
-		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
-		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
+		d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+		if (rdev->disp_priority == 2)
+			d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
 		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
 			S_006D48_D2MODE_PRIORITY_A_OFF(1));
 		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
@@ -544,12 +562,15 @@
 			priority_mark12.full = 0;
 		if (wm1.priority_mark_max.full > priority_mark12.full)
 			priority_mark12.full = wm1.priority_mark_max.full;
+		d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2)
+			d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
 		WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
 			S_006548_D1MODE_PRIORITY_A_OFF(1));
 		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
 			S_00654C_D1MODE_PRIORITY_B_OFF(1));
-		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
-		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
 	}
 }
 
@@ -657,6 +678,7 @@
 
 void rs690_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
index 62d31e7..36e6398 100644
--- a/drivers/gpu/drm/radeon/rs690d.h
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -182,6 +182,9 @@
 #define   S_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
 #define   G_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
 #define   C_006548_D1MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
 #define   S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
 #define   G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
 #define   C_006548_D1MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index bea747d..1cf233f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -29,6 +29,7 @@
 #include "drmP.h"
 #include "rv515d.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "atom.h"
 #include "rv515_reg_safe.h"
 
@@ -279,19 +280,13 @@
 
 void rv515_mc_init(struct radeon_device *rdev)
 {
-	fixed20_12 a;
 
 	rv515_vram_get_type(rdev);
 	r100_vram_init_sizes(rdev);
 	radeon_vram_location(rdev, &rdev->mc, 0);
 	if (!(rdev->flags & RADEON_IS_AGP))
 		radeon_gtt_location(rdev, &rdev->mc);
-	/* FIXME: we should enforce default clock in case GPU is not in
-	 * default setup
-	 */
-	a.full = rfixed_const(100);
-	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+	radeon_update_bandwidth_info(rdev);
 }
 
 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -539,6 +534,7 @@
 
 void rv515_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
@@ -1020,7 +1016,7 @@
 	struct drm_display_mode *mode1 = NULL;
 	struct rv515_watermark wm0;
 	struct rv515_watermark wm1;
-	u32 tmp;
+	u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
 	fixed20_12 priority_mark02, priority_mark12, fill_rate;
 	fixed20_12 a, b;
 
@@ -1088,10 +1084,16 @@
 			priority_mark12.full = 0;
 		if (wm1.priority_mark_max.full > priority_mark12.full)
 			priority_mark12.full = wm1.priority_mark_max.full;
-		WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
-		WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
-		WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
-		WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+		d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+		d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2) {
+			d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+			d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+		}
+		WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+		WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+		WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+		WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
 	} else if (mode0) {
 		if (rfixed_trunc(wm0.dbpp) > 64)
 			a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
@@ -1118,8 +1120,11 @@
 			priority_mark02.full = 0;
 		if (wm0.priority_mark_max.full > priority_mark02.full)
 			priority_mark02.full = wm0.priority_mark_max.full;
-		WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
-		WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
+		d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+		if (rdev->disp_priority == 2)
+			d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+		WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+		WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
 		WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
 		WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
 	} else {
@@ -1148,10 +1153,13 @@
 			priority_mark12.full = 0;
 		if (wm1.priority_mark_max.full > priority_mark12.full)
 			priority_mark12.full = wm1.priority_mark_max.full;
+		d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2)
+			d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
 		WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
 		WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
-		WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
-		WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+		WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+		WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
 	}
 }
 
@@ -1161,6 +1169,8 @@
 	struct drm_display_mode *mode0 = NULL;
 	struct drm_display_mode *mode1 = NULL;
 
+	radeon_update_display_priority(rdev);
+
 	if (rdev->mode_info.crtcs[0]->base.enabled)
 		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
 	if (rdev->mode_info.crtcs[1]->base.enabled)
@@ -1170,7 +1180,8 @@
 	 * modes if the user specifies HIGH for displaypriority
 	 * option.
 	 */
-	if (rdev->disp_priority == 2) {
+	if ((rdev->disp_priority == 2) &&
+	    (rdev->family == CHIP_RV515)) {
 		tmp = RREG32_MC(MC_MISC_LAT_TIMER);
 		tmp &= ~MC_DISP1R_INIT_LAT_MASK;
 		tmp &= ~MC_DISP0R_INIT_LAT_MASK;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 37887de..9f37d2e 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -29,6 +29,7 @@
 #include <linux/platform_device.h>
 #include "drmP.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "radeon_drm.h"
 #include "rv770d.h"
 #include "atom.h"
@@ -125,9 +126,9 @@
 
 void rv770_pcie_gart_fini(struct radeon_device *rdev)
 {
+	radeon_gart_fini(rdev);
 	rv770_pcie_gart_disable(rdev);
 	radeon_gart_table_vram_free(rdev);
-	radeon_gart_fini(rdev);
 }
 
 
@@ -647,10 +648,13 @@
 
 	WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
 	WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
+	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
 	WREG32(CC_SYS_RB_BACKEND_DISABLE,  cc_rb_backend_disable);
 
 	WREG32(CGTS_SYS_TCC_DISABLE, 0);
 	WREG32(CGTS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_TCC_DISABLE, 0);
 
 	num_qd_pipes =
 		R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
@@ -864,7 +868,6 @@
 
 int rv770_mc_init(struct radeon_device *rdev)
 {
-	fixed20_12 a;
 	u32 tmp;
 	int chansize, numchan;
 
@@ -908,12 +911,8 @@
 		rdev->mc.real_vram_size = rdev->mc.aper_size;
 	}
 	r600_vram_gtt_location(rdev, &rdev->mc);
-	/* FIXME: we should enforce default clock in case GPU is not in
-	 * default setup
-	 */
-	a.full = rfixed_const(100);
-	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+	radeon_update_bandwidth_info(rdev);
+
 	return 0;
 }
 
@@ -1013,6 +1012,13 @@
 		DRM_ERROR("radeon: failled testing IB (%d).\n", r);
 		return r;
 	}
+
+	r = r600_audio_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "radeon: audio init failed\n");
+		return r;
+	}
+
 	return r;
 
 }
@@ -1021,6 +1027,7 @@
 {
 	int r;
 
+	r600_audio_fini(rdev);
 	/* FIXME: we should wait for ring to be empty */
 	r700_cp_stop(rdev);
 	rdev->cp.ready = false;
@@ -1144,11 +1151,19 @@
 			}
 		}
 	}
+
+	r = r600_audio_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "radeon: audio init failed\n");
+		return r;
+	}
+
 	return 0;
 }
 
 void rv770_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r600_blit_fini(rdev);
 	r600_cp_fini(rdev);
 	r600_wb_fini(rdev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 89c38c4..dd47b2a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1425,8 +1425,8 @@
 
 	atomic_set(&glob->bo_count, 0);
 
-	kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
-	ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
+	ret = kobject_init_and_add(
+		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
 	if (unlikely(ret != 0))
 		kobject_put(&glob->kobj);
 	return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index eb143e0..c40e5f4 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -260,8 +260,8 @@
 	zone->used_mem = 0;
 	zone->glob = glob;
 	glob->zone_kernel = zone;
-	kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
-	ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
+	ret = kobject_init_and_add(
+		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
 	if (unlikely(ret != 0)) {
 		kobject_put(&zone->kobj);
 		return ret;
@@ -296,8 +296,8 @@
 	zone->used_mem = 0;
 	zone->glob = glob;
 	glob->zone_highmem = zone;
-	kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
-	ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
+	ret = kobject_init_and_add(
+		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
 	if (unlikely(ret != 0)) {
 		kobject_put(&zone->kobj);
 		return ret;
@@ -343,8 +343,8 @@
 	zone->used_mem = 0;
 	zone->glob = glob;
 	glob->zone_dma32 = zone;
-	kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
-	ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
+	ret = kobject_init_and_add(
+		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
 	if (unlikely(ret != 0)) {
 		kobject_put(&zone->kobj);
 		return ret;
@@ -365,10 +365,8 @@
 	glob->swap_queue = create_singlethread_workqueue("ttm_swap");
 	INIT_WORK(&glob->work, ttm_shrink_work);
 	init_waitqueue_head(&glob->queue);
-	kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type);
-	ret = kobject_add(&glob->kobj,
-			  ttm_get_kobj(),
-			  "memory_accounting");
+	ret = kobject_init_and_add(
+		&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
 	if (unlikely(ret != 0)) {
 		kobject_put(&glob->kobj);
 		return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a759170..bab6cd8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,13 +28,13 @@
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
 
-#include <linux/vmalloc.h>
 #include <linux/sched.h>
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/file.h>
 #include <linux/swap.h>
 #include "drm_cache.h"
+#include "drm_mem_util.h"
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
 #include "ttm/ttm_placement.h"
@@ -43,32 +43,15 @@
 
 /**
  * Allocates storage for pointers to the pages that back the ttm.
- *
- * Uses kmalloc if possible. Otherwise falls back to vmalloc.
  */
 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
 {
-	unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
-	ttm->pages = NULL;
-
-	if (size <= PAGE_SIZE)
-		ttm->pages = kzalloc(size, GFP_KERNEL);
-
-	if (!ttm->pages) {
-		ttm->pages = vmalloc_user(size);
-		if (ttm->pages)
-			ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
-	}
+	ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
 }
 
 static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
 {
-	if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
-		vfree(ttm->pages);
-		ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
-	} else {
-		kfree(ttm->pages);
-	}
+	drm_free_large(ttm->pages);
 	ttm->pages = NULL;
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index f20b8bc..30ad133 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,6 +1,6 @@
 config DRM_VMWGFX
 	tristate "DRM driver for VMware Virtual GPU"
-	depends on DRM && PCI
+	depends on DRM && PCI && FB
 	select FB_DEFERRED_IO
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index cab13e8..62416e6 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -53,10 +53,13 @@
 static int gyration_event(struct hid_device *hdev, struct hid_field *field,
 		struct hid_usage *usage, __s32 value)
 {
-	struct input_dev *input = field->hidinput->input;
+
+	if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
+		return 0;
 
 	if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
 			(usage->hid & 0xff) == 0x82) {
+		struct input_dev *input = field->hidinput->input;
 		input_event(input, usage->type, usage->code, 1);
 		input_sync(input);
 		input_event(input, usage->type, usage->code, 0);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 928943c..e71e005 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -60,6 +60,7 @@
 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
+	{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index e4595e6..9be8e17 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -217,8 +217,8 @@
 	depends on HWMON && I2C
 	help
 	  If you say yes here you get support for the aSC7621
-	  family of SMBus sensors chip found on most Intel X48, X38, 975,
-	  965 and 945 desktop boards.  Currently supported chips:
+	  family of SMBus sensors chip found on most Intel X38, X48, X58,
+	  945, 965 and 975 desktop boards.  Currently supported chips:
 	  aSC7621
 	  aSC7621a
 
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 2d7bcee..e9b7fbc 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -228,7 +228,7 @@
 		if (err) {
 			dev_warn(dev,
 				 "Unable to access MSR 0xEE, for Tjmax, left"
-				 " at default");
+				 " at default\n");
 		} else if (eax & 0x40000000) {
 			tjmax = tjmax_ee;
 		}
@@ -466,7 +466,7 @@
 			   family 6 CPU */
 			if ((c->x86 == 0x6) && (c->x86_model > 0xf))
 				printk(KERN_WARNING DRVNAME ": Unknown CPU "
-					"model %x\n", c->x86_model);
+					"model 0x%x\n", c->x86_model);
 			continue;
 		}
 
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 9de81a4..612807d 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1294,7 +1294,7 @@
 static ssize_t watchdog_write(struct file *filp, const char __user *buf,
 	size_t count, loff_t *offset)
 {
-	size_t ret;
+	ssize_t ret;
 	struct w83793_data *data = filp->private_data;
 
 	if (count) {
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index fbedd35..4c3d1bf 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -695,14 +695,8 @@
 	if (irqd)
 		disable_irq(hwif->irq);
 
-	rc = ide_port_wait_ready(hwif);
-	if (rc == -ENODEV) {
-		printk(KERN_INFO "%s: no devices on the port\n", hwif->name);
-		goto out;
-	} else if (rc == -EBUSY)
-		printk(KERN_ERR "%s: not ready before the probe\n", hwif->name);
-	else
-		rc = -ENODEV;
+	if (ide_port_wait_ready(hwif) == -EBUSY)
+		printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name);
 
 	/*
 	 * Second drive should only exist if first drive was found,
@@ -713,7 +707,7 @@
 		if (drive->dev_flags & IDE_DFLAG_PRESENT)
 			rc = 0;
 	}
-out:
+
 	/*
 	 * Use cached IRQ number. It might be (and is...) changed by probe
 	 * code above
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index e65d010..48fd4ef 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -110,7 +110,6 @@
 {
 	struct via_isa_bridge *via_config;
 	unsigned int via_80w;
-	u8 cached_device[2];
 };
 
 /**
@@ -403,66 +402,10 @@
 	.cable_detect		= via82cxxx_cable_detect,
 };
 
-static void via_write_devctl(ide_hwif_t *hwif, u8 ctl)
-{
-	struct via82cxxx_dev *vdev = hwif->host->host_priv;
-
-	outb(ctl, hwif->io_ports.ctl_addr);
-	outb(vdev->cached_device[hwif->channel], hwif->io_ports.device_addr);
-}
-
-static void __via_dev_select(ide_drive_t *drive, u8 select)
-{
-	ide_hwif_t *hwif = drive->hwif;
-	struct via82cxxx_dev *vdev = hwif->host->host_priv;
-
-	outb(select, hwif->io_ports.device_addr);
-	vdev->cached_device[hwif->channel] = select;
-}
-
-static void via_dev_select(ide_drive_t *drive)
-{
-	__via_dev_select(drive, drive->select | ATA_DEVICE_OBS);
-}
-
-static void via_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
-{
-	ide_hwif_t *hwif = drive->hwif;
-	struct ide_io_ports *io_ports = &hwif->io_ports;
-
-	if (valid & IDE_VALID_FEATURE)
-		outb(tf->feature, io_ports->feature_addr);
-	if (valid & IDE_VALID_NSECT)
-		outb(tf->nsect, io_ports->nsect_addr);
-	if (valid & IDE_VALID_LBAL)
-		outb(tf->lbal, io_ports->lbal_addr);
-	if (valid & IDE_VALID_LBAM)
-		outb(tf->lbam, io_ports->lbam_addr);
-	if (valid & IDE_VALID_LBAH)
-		outb(tf->lbah, io_ports->lbah_addr);
-	if (valid & IDE_VALID_DEVICE)
-		__via_dev_select(drive, tf->device);
-}
-
-const struct ide_tp_ops via_tp_ops = {
-	.exec_command		= ide_exec_command,
-	.read_status		= ide_read_status,
-	.read_altstatus		= ide_read_altstatus,
-	.write_devctl		= via_write_devctl,
-
-	.dev_select		= via_dev_select,
-	.tf_load		= via_tf_load,
-	.tf_read		= ide_tf_read,
-
-	.input_data		= ide_input_data,
-	.output_data		= ide_output_data,
-};
-
 static const struct ide_port_info via82cxxx_chipset __devinitdata = {
 	.name		= DRV_NAME,
 	.init_chipset	= init_chipset_via82cxxx,
 	.enablebits	= { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
-	.tp_ops		= &via_tp_ops,
 	.port_ops	= &via_port_ops,
 	.host_flags	= IDE_HFLAG_PIO_NO_BLACKLIST |
 			  IDE_HFLAG_POST_SET_MODE |
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index fcb6ec1..7245023 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -295,6 +295,10 @@
 	/* On x86 a breakpoint stop requires it to be decremented */
 	if (addr + 1 == kgdbts_regs.ip)
 		offset = -1;
+#elif defined(CONFIG_SUPERH)
+	/* On SUPERH a breakpoint stop requires it to be decremented */
+	if (addr + 2 == kgdbts_regs.pc)
+		offset = -2;
 #endif
 	if (strcmp(arg, "silent") &&
 		instruction_pointer(&kgdbts_regs) + offset != addr) {
@@ -305,6 +309,8 @@
 #ifdef CONFIG_X86
 	/* On x86 adjust the instruction pointer if needed */
 	kgdbts_regs.ip += offset;
+#elif defined(CONFIG_SUPERH)
+	kgdbts_regs.pc += offset;
 #endif
 	return 0;
 }
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 9ba5470..0ebd820 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -84,7 +84,7 @@
 
 #define ATLX_DRIVER_VERSION "2.1.3"
 MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
-	Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
+Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(ATLX_DRIVER_VERSION);
 
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 9560d48..51e1065 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -490,7 +490,7 @@
 {
 	int ret, i;
 	struct be_dma_mem ddrdma_cmd;
-	u64 pattern[2] = {0x5a5a5a5a5a5a5a5a, 0xa5a5a5a5a5a5a5a5};
+	u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
 
 	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
 	ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 430c022..5b92fbf 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1235,6 +1235,11 @@
 			write_lock_bh(&bond->curr_slave_lock);
 		}
 	}
+
+	/* resend IGMP joins since all were sent on curr_active_slave */
+	if (bond->params.mode == BOND_MODE_ROUNDROBIN) {
+		bond_resend_igmp_join_requests(bond);
+	}
 }
 
 /**
@@ -4138,22 +4143,41 @@
 	struct bonding *bond = netdev_priv(bond_dev);
 	struct slave *slave, *start_at;
 	int i, slave_no, res = 1;
+	struct iphdr *iph = ip_hdr(skb);
 
 	read_lock(&bond->lock);
 
 	if (!BOND_IS_OK(bond))
 		goto out;
-
 	/*
-	 * Concurrent TX may collide on rr_tx_counter; we accept that
-	 * as being rare enough not to justify using an atomic op here
+	 * Start with the curr_active_slave that joined the bond as the
+	 * default for sending IGMP traffic.  For failover purposes one
+	 * needs to maintain some consistency for the interface that will
+	 * send the join/membership reports.  The curr_active_slave found
+	 * will send all of this type of traffic.
 	 */
-	slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
+	if ((iph->protocol == htons(IPPROTO_IGMP)) &&
+	    (skb->protocol == htons(ETH_P_IP))) {
 
-	bond_for_each_slave(bond, slave, i) {
-		slave_no--;
-		if (slave_no < 0)
-			break;
+		read_lock(&bond->curr_slave_lock);
+		slave = bond->curr_active_slave;
+		read_unlock(&bond->curr_slave_lock);
+
+		if (!slave)
+			goto out;
+	} else {
+		/*
+		 * Concurrent TX may collide on rr_tx_counter; we accept
+		 * that as being rare enough not to justify using an
+		 * atomic op here.
+		 */
+		slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
+
+		bond_for_each_slave(bond, slave, i) {
+			slave_no--;
+			if (slave_no < 0)
+				break;
+		}
 	}
 
 	start_at = slave;
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 9902b33..2f29c21 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -261,7 +261,6 @@
 	/* TX */
 	struct e1000_tx_ring *tx_ring;      /* One per active queue */
 	unsigned int restart_queue;
-	unsigned long tx_queue_len;
 	u32 txd_cmd;
 	u32 tx_int_delay;
 	u32 tx_abs_int_delay;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 8be6fae..b15ece2 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -383,8 +383,6 @@
 		adapter->alloc_rx_buf(adapter, ring,
 		                      E1000_DESC_UNUSED(ring));
 	}
-
-	adapter->tx_queue_len = netdev->tx_queue_len;
 }
 
 int e1000_up(struct e1000_adapter *adapter)
@@ -503,7 +501,6 @@
 	del_timer_sync(&adapter->watchdog_timer);
 	del_timer_sync(&adapter->phy_info_timer);
 
-	netdev->tx_queue_len = adapter->tx_queue_len;
 	adapter->link_speed = 0;
 	adapter->link_duplex = 0;
 	netif_carrier_off(netdev);
@@ -2316,19 +2313,15 @@
 			        E1000_CTRL_RFCE) ? "RX" : ((ctrl &
 			        E1000_CTRL_TFCE) ? "TX" : "None" )));
 
-			/* tweak tx_queue_len according to speed/duplex
-			 * and adjust the timeout factor */
-			netdev->tx_queue_len = adapter->tx_queue_len;
+			/* adjust timeout factor according to speed/duplex */
 			adapter->tx_timeout_factor = 1;
 			switch (adapter->link_speed) {
 			case SPEED_10:
 				txb2b = false;
-				netdev->tx_queue_len = 10;
 				adapter->tx_timeout_factor = 16;
 				break;
 			case SPEED_100:
 				txb2b = false;
-				netdev->tx_queue_len = 100;
 				/* maybe add some timeout factor ? */
 				break;
 			}
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index c2ec095..118bdf4 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -279,7 +279,6 @@
 
 	struct napi_struct napi;
 
-	unsigned long tx_queue_len;
 	unsigned int restart_queue;
 	u32 txd_cmd;
 
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 88d54d3..e1cceb6 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2289,8 +2289,6 @@
 	ew32(TCTL, tctl);
 
 	e1000e_config_collision_dist(hw);
-
-	adapter->tx_queue_len = adapter->netdev->tx_queue_len;
 }
 
 /**
@@ -2877,7 +2875,6 @@
 	del_timer_sync(&adapter->watchdog_timer);
 	del_timer_sync(&adapter->phy_info_timer);
 
-	netdev->tx_queue_len = adapter->tx_queue_len;
 	netif_carrier_off(netdev);
 	adapter->link_speed = 0;
 	adapter->link_duplex = 0;
@@ -3588,21 +3585,15 @@
 					       "link gets many collisions.\n");
 			}
 
-			/*
-			 * tweak tx_queue_len according to speed/duplex
-			 * and adjust the timeout factor
-			 */
-			netdev->tx_queue_len = adapter->tx_queue_len;
+			/* adjust timeout factor according to speed/duplex */
 			adapter->tx_timeout_factor = 1;
 			switch (adapter->link_speed) {
 			case SPEED_10:
 				txb2b = 0;
-				netdev->tx_queue_len = 10;
 				adapter->tx_timeout_factor = 16;
 				break;
 			case SPEED_100:
 				txb2b = 0;
-				netdev->tx_queue_len = 100;
 				adapter->tx_timeout_factor = 10;
 				break;
 			}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b671555..669de02 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2393,6 +2393,7 @@
 	 * as many bytes as needed to align the data properly
 	 */
 	skb_reserve(skb, alignamount);
+	GFAR_CB(skb)->alignamount = alignamount;
 
 	return skb;
 }
@@ -2533,13 +2534,13 @@
 				newskb = skb;
 			else if (skb) {
 				/*
-				 * We need to reset ->data to what it
+				 * We need to un-reserve() the skb to what it
 				 * was before gfar_new_skb() re-aligned
 				 * it to an RXBUF_ALIGNMENT boundary
 				 * before we put the skb back on the
 				 * recycle list.
 				 */
-				skb->data = skb->head + NET_SKB_PAD;
+				skb_reserve(skb, -GFAR_CB(skb)->alignamount);
 				__skb_queue_head(&priv->rx_recycle, skb);
 			}
 		} else {
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 3d72dc4..17d25e7 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -566,6 +566,12 @@
 	u16	vlctl;	/* VLAN control word */
 };
 
+struct gianfar_skb_cb {
+	int alignamount;
+};
+
+#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
+
 struct rmon_mib
 {
 	u32	tr64;	/* 0x.680 - Transmit and Receive 64-byte Frame Counter */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 2a8a886..be8d010 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -1367,7 +1367,8 @@
  *  igb_enable_mng_pass_thru - Enable processing of ARP's
  *  @hw: pointer to the HW structure
  *
- *  Verifies the hardware needs to allow ARPs to be processed by the host.
+ *  Verifies the hardware needs to leave interface enabled so that frames can
+ *  be directed to and from the management interface.
  **/
 bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
 {
@@ -1380,8 +1381,7 @@
 
 	manc = rd32(E1000_MANC);
 
-	if (!(manc & E1000_MANC_RCV_TCO_EN) ||
-	    !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+	if (!(manc & E1000_MANC_RCV_TCO_EN))
 		goto out;
 
 	if (hw->mac.arc_subsystem_valid) {
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index a1774b2..debeee2 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -198,7 +198,6 @@
 	struct igbvf_ring *tx_ring /* One per active queue */
 	____cacheline_aligned_in_smp;
 
-	unsigned long tx_queue_len;
 	unsigned int restart_queue;
 	u32 txd_cmd;
 
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index a77afd8..b41037e 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1304,8 +1304,6 @@
 
 	/* enable Report Status bit */
 	adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
-
-	adapter->tx_queue_len = adapter->netdev->tx_queue_len;
 }
 
 /**
@@ -1524,7 +1522,6 @@
 
 	del_timer_sync(&adapter->watchdog_timer);
 
-	netdev->tx_queue_len = adapter->tx_queue_len;
 	netif_carrier_off(netdev);
 
 	/* record the stats before reset*/
@@ -1857,21 +1854,15 @@
 			                          &adapter->link_duplex);
 			igbvf_print_link_info(adapter);
 
-			/*
-			 * tweak tx_queue_len according to speed/duplex
-			 * and adjust the timeout factor
-			 */
-			netdev->tx_queue_len = adapter->tx_queue_len;
+			/* adjust timeout factor according to speed/duplex */
 			adapter->tx_timeout_factor = 1;
 			switch (adapter->link_speed) {
 			case SPEED_10:
 				txb2b = 0;
-				netdev->tx_queue_len = 10;
 				adapter->tx_timeout_factor = 16;
 				break;
 			case SPEED_100:
 				txb2b = 0;
-				netdev->tx_queue_len = 100;
 				/* maybe add some timeout factor ? */
 				break;
 			}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 19e94ee..79c35ae 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -204,14 +204,17 @@
 #define IXGBE_MAX_FDIR_INDICES 64
 #ifdef IXGBE_FCOE
 #define IXGBE_MAX_FCOE_INDICES  8
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#else
+#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
+#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
 #endif /* IXGBE_FCOE */
 struct ixgbe_ring_feature {
 	int indices;
 	int mask;
 } ____cacheline_internodealigned_in_smp;
 
-#define MAX_RX_QUEUES 128
-#define MAX_TX_QUEUES 128
 
 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
                               ? 8 : 1)
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 7949a44..1959ef7 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1853,6 +1853,26 @@
 		if (ixgbe_link_test(adapter, &data[4]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
+		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+			int i;
+			for (i = 0; i < adapter->num_vfs; i++) {
+				if (adapter->vfinfo[i].clear_to_send) {
+					netdev_warn(netdev, "%s",
+						    "offline diagnostic is not "
+						    "supported when VFs are "
+						    "present\n");
+					data[0] = 1;
+					data[1] = 1;
+					data[2] = 1;
+					data[3] = 1;
+					eth_test->flags |= ETH_TEST_FL_FAILED;
+					clear_bit(__IXGBE_TESTING,
+						  &adapter->state);
+					goto skip_ol_tests;
+				}
+			}
+		}
+
 		if (if_running)
 			/* indicate we're in test mode */
 			dev_close(netdev);
@@ -1908,6 +1928,7 @@
 
 		clear_bit(__IXGBE_TESTING, &adapter->state);
 	}
+skip_ol_tests:
 	msleep_interruptible(4 * 1000);
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 700cfc0..9276d59 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -202,6 +202,15 @@
 		addr = sg_dma_address(sg);
 		len = sg_dma_len(sg);
 		while (len) {
+			/* max number of buffers allowed in one DDP context */
+			if (j >= IXGBE_BUFFCNT_MAX) {
+				netif_err(adapter, drv, adapter->netdev,
+					  "xid=%x:%d,%d,%d:addr=%llx "
+					  "not enough descriptors\n",
+					  xid, i, j, dmacount, (u64)addr);
+				goto out_noddp_free;
+			}
+
 			/* get the offset of length of current buffer */
 			thisoff = addr & ((dma_addr_t)bufflen - 1);
 			thislen = min((bufflen - thisoff), len);
@@ -227,20 +236,13 @@
 			len -= thislen;
 			addr += thislen;
 			j++;
-			/* max number of buffers allowed in one DDP context */
-			if (j > IXGBE_BUFFCNT_MAX) {
-				DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx "
-					"not enough descriptors\n",
-					xid, i, j, dmacount, (u64)addr);
-				goto out_noddp_free;
-			}
 		}
 	}
 	/* only the last buffer may have non-full bufflen */
 	lastsize = thisoff + thislen;
 
 	fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
-	fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT);
+	fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
 	fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
 	fcbuff |= (IXGBE_FCBUFF_VALID);
 
@@ -520,6 +522,9 @@
 	/* Enable L2 eth type filter for FCoE */
 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
 			(ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
+	/* Enable L2 eth type filter for FIP */
+	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
+			(ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
 	if (adapter->ring_feature[RING_F_FCOE].indices) {
 		/* Use multiple rx queues for FCoE by redirection table */
 		for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
@@ -530,6 +535,12 @@
 		}
 		IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
 		IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
+		fcoe_i = f->mask;
+		fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
+		fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+		IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
+				IXGBE_ETQS_QUEUE_EN |
+				(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
 	} else  {
 		/* Use single rx queue for FCoE */
 		fcoe_i = f->mask;
@@ -539,6 +550,12 @@
 				IXGBE_ETQS_QUEUE_EN |
 				(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
 	}
+	/* send FIP frames to the first FCoE queue */
+	fcoe_i = f->mask;
+	fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+	IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
+			IXGBE_ETQS_QUEUE_EN |
+			(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
 
 	IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
 			IXGBE_FCRXCTRL_FCOELLI |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index d75c46f..0c553f6 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3056,6 +3056,14 @@
 	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
 		msleep(1);
 	ixgbe_down(adapter);
+	/*
+	 * If SR-IOV enabled then wait a bit before bringing the adapter
+	 * back up to give the VFs time to respond to the reset.  The
+	 * two second wait is based upon the watchdog timer cycle in
+	 * the VF driver.
+	 */
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+		msleep(2000);
 	ixgbe_up(adapter);
 	clear_bit(__IXGBE_RESETTING, &adapter->state);
 }
@@ -3236,13 +3244,15 @@
 
 	/* disable receive for all VFs and wait one second */
 	if (adapter->num_vfs) {
-		for (i = 0 ; i < adapter->num_vfs; i++)
-			adapter->vfinfo[i].clear_to_send = 0;
-
 		/* ping all the active vfs to let them know we are going down */
 		ixgbe_ping_all_vfs(adapter);
+
 		/* Disable all VFTE/VFRE TX/RX */
 		ixgbe_disable_tx_rx(adapter);
+
+		/* Mark all the VFs as inactive */
+		for (i = 0 ; i < adapter->num_vfs; i++)
+			adapter->vfinfo[i].clear_to_send = 0;
 	}
 
 	/* disable receives */
@@ -5638,7 +5648,8 @@
 
 #ifdef IXGBE_FCOE
 	if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
-	    (skb->protocol == htons(ETH_P_FCOE))) {
+	    ((skb->protocol == htons(ETH_P_FCOE)) ||
+	     (skb->protocol == htons(ETH_P_FIP)))) {
 		txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
 		txq += adapter->ring_feature[RING_F_FCOE].mask;
 		return txq;
@@ -5685,18 +5696,25 @@
 
 	tx_ring = adapter->tx_ring[skb->queue_mapping];
 
-	if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
-	    (skb->protocol == htons(ETH_P_FCOE))) {
-		tx_flags |= IXGBE_TX_FLAGS_FCOE;
 #ifdef IXGBE_FCOE
+	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 #ifdef CONFIG_IXGBE_DCB
-		tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
-			      << IXGBE_TX_FLAGS_VLAN_SHIFT);
-		tx_flags |= ((adapter->fcoe.up << 13)
-			      << IXGBE_TX_FLAGS_VLAN_SHIFT);
+		/* for FCoE with DCB, we force the priority to what
+		 * was specified by the switch */
+		if ((skb->protocol == htons(ETH_P_FCOE)) ||
+		    (skb->protocol == htons(ETH_P_FIP))) {
+			tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+				      << IXGBE_TX_FLAGS_VLAN_SHIFT);
+			tx_flags |= ((adapter->fcoe.up << 13)
+				     << IXGBE_TX_FLAGS_VLAN_SHIFT);
+		}
 #endif
-#endif
+		/* flag for FCoE offloads */
+		if (skb->protocol == htons(ETH_P_FCOE))
+			tx_flags |= IXGBE_TX_FLAGS_FCOE;
 	}
+#endif
+
 	/* four things can cause us to need a context descriptor */
 	if (skb_is_gso(skb) ||
 	    (skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -6051,7 +6069,6 @@
 	indices += min_t(unsigned int, num_possible_cpus(),
 			 IXGBE_MAX_FCOE_INDICES);
 #endif
-	indices = min_t(unsigned int, indices, MAX_TX_QUEUES);
 	netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
 	if (!netdev) {
 		err = -ENOMEM;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 0ed5ab3..4ec6dc1 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1298,6 +1298,7 @@
 #define IXGBE_ETQF_FILTER_BCN            1
 #define IXGBE_ETQF_FILTER_FCOE           2
 #define IXGBE_ETQF_FILTER_1588           3
+#define IXGBE_ETQF_FILTER_FIP            4
 /* VLAN Control Bit Masks */
 #define IXGBE_VLNCTRL_VET       0x0000FFFF  /* bits 0-15 */
 #define IXGBE_VLNCTRL_CFI       0x10000000  /* bit 28 */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index d6cbd94..1bbbef3 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -2943,9 +2943,10 @@
 	struct ixgbevf_tx_buffer *tx_buffer_info;
 	unsigned int len;
 	unsigned int total = skb->len;
-	unsigned int offset = 0, size, count = 0, i;
+	unsigned int offset = 0, size, count = 0;
 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
 	unsigned int f;
+	int i;
 
 	i = tx_ring->next_to_use;
 
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 144d2e8..0f70383 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 72
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.72"
+#define _NETXEN_NIC_LINUX_SUBVERSION 73
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.73"
 
 #define NETXEN_VERSION_CODE(a, b, c)	(((a) << 24) + ((b) << 16) + (c))
 #define _major(v)	(((v) >> 24) & 0xff)
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 2a8ef5f..f26e547 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -669,13 +669,15 @@
 		}
 		sds_ring->desc_head = (struct status_desc *)addr;
 
-		sds_ring->crb_sts_consumer =
-			netxen_get_ioaddr(adapter,
-			recv_crb_registers[port].crb_sts_consumer[ring]);
+		if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+			sds_ring->crb_sts_consumer =
+				netxen_get_ioaddr(adapter,
+				recv_crb_registers[port].crb_sts_consumer[ring]);
 
-		sds_ring->crb_intr_mask =
-			netxen_get_ioaddr(adapter,
-			recv_crb_registers[port].sw_int_mask[ring]);
+			sds_ring->crb_intr_mask =
+				netxen_get_ioaddr(adapter,
+				recv_crb_registers[port].sw_int_mask[ring]);
+		}
 	}
 
 
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 1c63610e..7eb925a 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -761,7 +761,7 @@
 	if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
 		bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
 						+ NX_UNI_BIOS_VERSION_OFF));
-		return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) +
+		return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
 							(bios_ver >> 24);
 	} else
 		return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 9a7a0f3..01808b2 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -604,16 +604,14 @@
 static int
 netxen_setup_pci_map(struct netxen_adapter *adapter)
 {
-	void __iomem *mem_ptr0 = NULL;
-	void __iomem *mem_ptr1 = NULL;
-	void __iomem *mem_ptr2 = NULL;
 	void __iomem *db_ptr = NULL;
 
 	resource_size_t mem_base, db_base;
-	unsigned long mem_len, db_len = 0, pci_len0 = 0;
+	unsigned long mem_len, db_len = 0;
 
 	struct pci_dev *pdev = adapter->pdev;
 	int pci_func = adapter->ahw.pci_func;
+	struct netxen_hardware_context *ahw = &adapter->ahw;
 
 	int err = 0;
 
@@ -630,24 +628,40 @@
 
 	/* 128 Meg of memory */
 	if (mem_len == NETXEN_PCI_128MB_SIZE) {
-		mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
-		mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
+
+		ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
+		ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
 				SECOND_PAGE_GROUP_SIZE);
-		mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
+		ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
 				THIRD_PAGE_GROUP_SIZE);
-		pci_len0 = FIRST_PAGE_GROUP_SIZE;
+		if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
+						ahw->pci_base2 == NULL) {
+			dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+			err = -EIO;
+			goto err_out;
+		}
+
+		ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
+
 	} else if (mem_len == NETXEN_PCI_32MB_SIZE) {
-		mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
-		mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
+
+		ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
+		ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
 			SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
+		if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
+			dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+			err = -EIO;
+			goto err_out;
+		}
+
 	} else if (mem_len == NETXEN_PCI_2MB_SIZE) {
 
-		mem_ptr0 = pci_ioremap_bar(pdev, 0);
-		if (mem_ptr0 == NULL) {
+		ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
+		if (ahw->pci_base0 == NULL) {
 			dev_err(&pdev->dev, "failed to map PCI bar 0\n");
 			return -EIO;
 		}
-		pci_len0 = mem_len;
+		ahw->pci_len0 = mem_len;
 	} else {
 		return -EIO;
 	}
@@ -656,11 +670,6 @@
 
 	dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
 
-	adapter->ahw.pci_base0 = mem_ptr0;
-	adapter->ahw.pci_len0 = pci_len0;
-	adapter->ahw.pci_base1 = mem_ptr1;
-	adapter->ahw.pci_base2 = mem_ptr2;
-
 	if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
 		adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
 			NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 776cad2..1028fcb 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1549,6 +1549,7 @@
 	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101),
 	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4),
+	PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033),
@@ -1740,7 +1741,7 @@
 	PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
 	PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
 	PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
-	PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
+	PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("PMX   ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 9d3ebf3..9674005 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -186,8 +186,13 @@
 
 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
 
-static int rx_copybreak = 200;
-static int use_dac = -1;
+/*
+ * we set our copybreak very high so that we don't have
+ * to allocate 16k frames all the time (see note in
+ * rtl8169_open()
+ */
+static int rx_copybreak = 16383;
+static int use_dac;
 static struct {
 	u32 msg_enable;
 } debug = { -1 };
@@ -511,8 +516,7 @@
 module_param(rx_copybreak, int, 0);
 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 module_param(use_dac, int, 0);
-MODULE_PARM_DESC(use_dac, "Enable PCI DAC. -1 defaults on for PCI Express only."
-" Unsafe on 32 bit PCI slot.");
+MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
 module_param_named(debug, debug.msg_enable, int, 0);
 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
 MODULE_LICENSE("GPL");
@@ -2821,8 +2825,8 @@
 	spin_lock_irq(&tp->lock);
 
 	RTL_W8(Cfg9346, Cfg9346_Unlock);
-	RTL_W32(MAC0, low);
 	RTL_W32(MAC4, high);
+	RTL_W32(MAC0, low);
 	RTL_W8(Cfg9346, Cfg9346_Lock);
 
 	spin_unlock_irq(&tp->lock);
@@ -2974,7 +2978,6 @@
 	void __iomem *ioaddr;
 	unsigned int i;
 	int rc;
-	int this_use_dac = use_dac;
 
 	if (netif_msg_drv(&debug)) {
 		printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
@@ -3040,17 +3043,8 @@
 
 	tp->cp_cmd = PCIMulRW | RxChkSum;
 
-	tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-	if (!tp->pcie_cap)
-		netif_info(tp, probe, dev, "no PCI Express capability\n");
-
-	if (this_use_dac < 0)
-		this_use_dac = tp->pcie_cap != 0;
-
 	if ((sizeof(dma_addr_t) > 4) &&
-	    this_use_dac &&
-	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-		netif_info(tp, probe, dev, "using 64-bit DMA\n");
+	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
 		tp->cp_cmd |= PCIDAC;
 		dev->features |= NETIF_F_HIGHDMA;
 	} else {
@@ -3069,6 +3063,10 @@
 		goto err_out_free_res_4;
 	}
 
+	tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (!tp->pcie_cap)
+		netif_info(tp, probe, dev, "no PCI Express capability\n");
+
 	RTL_W16(IntrMask, 0x0000);
 
 	/* Soft reset the chip. */
@@ -3224,9 +3222,13 @@
 }
 
 static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
-				  struct net_device *dev)
+				  unsigned int mtu)
 {
-	unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+	unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+	if (max_frame != 16383)
+		printk(KERN_WARNING "WARNING! Changing of MTU on this NIC"
+			"May lead to frame reception errors!\n");
 
 	tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
 }
@@ -3238,7 +3240,17 @@
 	int retval = -ENOMEM;
 
 
-	rtl8169_set_rxbufsize(tp, dev);
+	/*
+	 * Note that we use a magic value here, its wierd I know
+	 * its done because, some subset of rtl8169 hardware suffers from
+	 * a problem in which frames received that are longer than
+	 * the size set in RxMaxSize register return garbage sizes
+	 * when received.  To avoid this we need to turn off filtering,
+	 * which is done by setting a value of 16383 in the RxMaxSize register
+	 * and allocating 16k frames to handle the largest possible rx value
+	 * thats what the magic math below does.
+	 */
+	rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
 
 	/*
 	 * Rx and Tx desscriptors needs 256 bytes alignment.
@@ -3891,7 +3903,7 @@
 
 	rtl8169_down(dev);
 
-	rtl8169_set_rxbufsize(tp, dev);
+	rtl8169_set_rxbufsize(tp, dev->mtu);
 
 	ret = rtl8169_init_ring(dev);
 	if (ret < 0)
@@ -4754,8 +4766,8 @@
 		mc_filter[1] = swab32(data);
 	}
 
-	RTL_W32(MAR0 + 0, mc_filter[0]);
 	RTL_W32(MAR0 + 4, mc_filter[1]);
+	RTL_W32(MAR0 + 0, mc_filter[0]);
 
 	RTL_W32(RxConfig, tmp);
 
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 0ab05af..a4f09d4 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -851,13 +851,15 @@
 
 			if ( !(rdes0 & 0x8000) ||
 				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
+				struct sk_buff *new_skb = NULL;
+
 				skb = rxptr->rx_skb_ptr;
 
 				/* Good packet, send to upper layer */
 				/* Shorst packet used new SKB */
-				if ( (rxlen < RX_COPY_SIZE) &&
-					( (skb = dev_alloc_skb(rxlen + 2) )
-					!= NULL) ) {
+				if ((rxlen < RX_COPY_SIZE) &&
+				    (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) {
+					skb = new_skb;
 					/* size less than COPY_SIZE, allocate a rxlen SKB */
 					skb_reserve(skb, 2); /* 16byte align */
 					memcpy(skb_put(skb, rxlen),
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 406757a..dee4fb5 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -376,8 +376,11 @@
 		if (!np->type)
 			np->type = "<NULL>";
 	}
-	while (tag == OF_DT_BEGIN_NODE) {
-		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
+	while (tag == OF_DT_BEGIN_NODE || tag == OF_DT_NOP) {
+		if (tag == OF_DT_NOP)
+			*p += 4;
+		else
+			mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
 		tag = be32_to_cpup((__be32 *)(*p));
 	}
 	if (tag != OF_DT_END_NODE) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 3ea0b29..27c0e6e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2123,6 +2123,9 @@
 	}
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9602, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK, 0x9602, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AI, 0x9602, quirk_disable_msi);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
 
 /* Go through the list of Hypertransport capabilities and
@@ -2495,39 +2498,6 @@
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
 			quirk_msi_intx_disable_bug);
 
-/*
- * MSI does not work with the AMD RS780/RS880 internal graphics and HDMI audio
- * devices unless the BIOS has initialized the nb_cntl.strap_msi_enable bit.
- */
-static void __init rs780_int_gfx_disable_msi(struct pci_dev *int_gfx_bridge)
-{
-	u32 nb_cntl;
-
-	if (!int_gfx_bridge->subordinate)
-		return;
-
-	pci_bus_write_config_dword(int_gfx_bridge->bus, PCI_DEVFN(0, 0),
-				   0x60, 0);
-	pci_bus_read_config_dword(int_gfx_bridge->bus, PCI_DEVFN(0, 0),
-				  0x64, &nb_cntl);
-
-	if (!(nb_cntl & BIT(10))) {
-		dev_warn(&int_gfx_bridge->dev,
-			 FW_WARN "RS780: MSI for internal graphics disabled\n");
-		int_gfx_bridge->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
-	}
-}
-
-#define PCI_DEVICE_ID_AMD_RS780_P2P_INT_GFX	0x9602
-
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,
-			PCI_DEVICE_ID_AMD_RS780_P2P_INT_GFX,
-			rs780_int_gfx_disable_msi);
-/* wrong vendor ID on M4A785TD motherboard: */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK,
-			PCI_DEVICE_ID_AMD_RS780_P2P_INT_GFX,
-			rs780_int_gfx_disable_msi);
-
 #endif /* CONFIG_PCI_MSI */
 
 #ifdef CONFIG_PCI_IOV
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index e631dbe..7bec458 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -385,6 +385,16 @@
 
 	  If you have an Eee PC laptop, say Y or M here.
 
+config EEEPC_WMI
+	tristate "Eee PC WMI Hotkey Driver (EXPERIMENTAL)"
+	depends on ACPI_WMI
+	depends on INPUT
+	depends on EXPERIMENTAL
+	---help---
+	  Say Y here if you want to support WMI-based hotkeys on Eee PC laptops.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called eeepc-wmi.
 
 config ACPI_WMI
 	tristate "WMI"
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 9cd9fa0..a906490 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -4,6 +4,7 @@
 #
 obj-$(CONFIG_ASUS_LAPTOP)	+= asus-laptop.o
 obj-$(CONFIG_EEEPC_LAPTOP)	+= eeepc-laptop.o
+obj-$(CONFIG_EEEPC_WMI)		+= eeepc-wmi.o
 obj-$(CONFIG_MSI_LAPTOP)	+= msi-laptop.o
 obj-$(CONFIG_ACPI_CMPC)		+= classmate-laptop.o
 obj-$(CONFIG_COMPAL_LAPTOP)	+= compal-laptop.o
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index db5f7db..475ab50 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -139,7 +139,7 @@
 
 /* Backlight */
 static acpi_handle lcd_switch_handle;
-static const char *lcd_switch_paths[] = {
+static char *lcd_switch_paths[] = {
   "\\_SB.PCI0.SBRG.EC0._Q10",	/* All new models */
   "\\_SB.PCI0.ISA.EC0._Q10",	/* A1x */
   "\\_SB.PCI0.PX40.ECD0._Q10",	/* L3C */
@@ -153,7 +153,7 @@
 #define METHOD_SWITCH_DISPLAY	"SDSP"
 
 static acpi_handle display_get_handle;
-static const char *display_get_paths[] = {
+static char *display_get_paths[] = {
   /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
   "\\_SB.PCI0.P0P1.VGA.GETD",
   /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
new file mode 100644
index 0000000..2466b7b
--- /dev/null
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -0,0 +1,157 @@
+/*
+ * Eee PC WMI hotkey driver
+ *
+ * Copyright(C) 2010 Intel Corporation.
+ *
+ * Portions based on wistron_btns.c:
+ * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
+ * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
+ * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
+MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
+MODULE_LICENSE("GPL");
+
+#define EEEPC_WMI_EVENT_GUID	"ABBC0F72-8EA1-11D1-00A0-C90629100000"
+
+MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID);
+
+#define NOTIFY_BRNUP_MIN	0x11
+#define NOTIFY_BRNUP_MAX	0x1f
+#define NOTIFY_BRNDOWN_MIN	0x20
+#define NOTIFY_BRNDOWN_MAX	0x2e
+
+static const struct key_entry eeepc_wmi_keymap[] = {
+	/* Sleep already handled via generic ACPI code */
+	{ KE_KEY, 0x5d, { KEY_WLAN } },
+	{ KE_KEY, 0x32, { KEY_MUTE } },
+	{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+	{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
+	{ KE_IGNORE, NOTIFY_BRNDOWN_MIN, { KEY_BRIGHTNESSDOWN } },
+	{ KE_IGNORE, NOTIFY_BRNUP_MIN, { KEY_BRIGHTNESSUP } },
+	{ KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
+	{ KE_END, 0},
+};
+
+static struct input_dev *eeepc_wmi_input_dev;
+
+static void eeepc_wmi_notify(u32 value, void *context)
+{
+	struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+	union acpi_object *obj;
+	acpi_status status;
+	int code;
+
+	status = wmi_get_event_data(value, &response);
+	if (status != AE_OK) {
+		pr_err("EEEPC WMI: bad event status 0x%x\n", status);
+		return;
+	}
+
+	obj = (union acpi_object *)response.pointer;
+
+	if (obj && obj->type == ACPI_TYPE_INTEGER) {
+		code = obj->integer.value;
+
+		if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
+			code = NOTIFY_BRNUP_MIN;
+		else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
+			code = NOTIFY_BRNDOWN_MIN;
+
+		if (!sparse_keymap_report_event(eeepc_wmi_input_dev,
+						code, 1, true))
+			pr_info("EEEPC WMI: Unknown key %x pressed\n", code);
+	}
+
+	kfree(obj);
+}
+
+static int eeepc_wmi_input_setup(void)
+{
+	int err;
+
+	eeepc_wmi_input_dev = input_allocate_device();
+	if (!eeepc_wmi_input_dev)
+		return -ENOMEM;
+
+	eeepc_wmi_input_dev->name = "Eee PC WMI hotkeys";
+	eeepc_wmi_input_dev->phys = "wmi/input0";
+	eeepc_wmi_input_dev->id.bustype = BUS_HOST;
+
+	err = sparse_keymap_setup(eeepc_wmi_input_dev, eeepc_wmi_keymap, NULL);
+	if (err)
+		goto err_free_dev;
+
+	err = input_register_device(eeepc_wmi_input_dev);
+	if (err)
+		goto err_free_keymap;
+
+	return 0;
+
+err_free_keymap:
+	sparse_keymap_free(eeepc_wmi_input_dev);
+err_free_dev:
+	input_free_device(eeepc_wmi_input_dev);
+	return err;
+}
+
+static int __init eeepc_wmi_init(void)
+{
+	int err;
+	acpi_status status;
+
+	if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID)) {
+		pr_warning("EEEPC WMI: No known WMI GUID found\n");
+		return -ENODEV;
+	}
+
+	err = eeepc_wmi_input_setup();
+	if (err)
+		return err;
+
+	status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
+					eeepc_wmi_notify, NULL);
+	if (ACPI_FAILURE(status)) {
+		sparse_keymap_free(eeepc_wmi_input_dev);
+		input_unregister_device(eeepc_wmi_input_dev);
+		pr_err("EEEPC WMI: Unable to register notify handler - %d\n",
+			status);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void __exit eeepc_wmi_exit(void)
+{
+	wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
+	sparse_keymap_free(eeepc_wmi_input_dev);
+	input_unregister_device(eeepc_wmi_input_dev);
+}
+
+module_init(eeepc_wmi_init);
+module_exit(eeepc_wmi_exit);
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index e91db4b..175d202 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -745,6 +745,7 @@
 	PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29),
 	PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4),
+	PCMCIA_PFC_DEVICE_PROD_ID12(1, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
diff --git a/drivers/staging/et131x/et1310_mac.c b/drivers/staging/et131x/et1310_mac.c
index a292b1e..737a9f5 100644
--- a/drivers/staging/et131x/et1310_mac.c
+++ b/drivers/staging/et131x/et1310_mac.c
@@ -226,7 +226,7 @@
 	}
 
 	/* Enable TXMAC */
-	ctl |= 0x05;	/* TX mac enable, FC disable */
+	ctl |= 0x09;	/* TX mac enable, FC disable */
 	writel(ctl, &etdev->regs->txmac.ctl);
 
 	/* Ready to start the RXDMA/TXDMA engine */
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 12ac9cd..df1bae9 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1370,6 +1370,12 @@
 {
 	struct at91_udc		*udc = _udc;
 	u32			rescans = 5;
+	int			disable_clock = 0;
+
+	if (!udc->clocked) {
+		clk_on(udc);
+		disable_clock = 1;
+	}
 
 	while (rescans--) {
 		u32 status;
@@ -1458,6 +1464,9 @@
 		}
 	}
 
+	if (disable_clock)
+		clk_off(udc);
+
 	return IRQ_HANDLED;
 }
 
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 5e13d23..8b45145 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -23,7 +23,6 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
-#include <linux/err.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 23bb0ce..ce8ef61 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -919,6 +919,10 @@
 /*
  * We are only allowed to write into/dirty the page if the page is
  * clean, or already dirty within the same snap context.
+ *
+ * called with page locked.
+ * return success with page locked,
+ * or any failure (incl -EAGAIN) with page unlocked.
  */
 static int ceph_update_writeable_page(struct file *file,
 			    loff_t pos, unsigned len,
@@ -961,9 +965,11 @@
 			snapc = ceph_get_snap_context((void *)page->private);
 			unlock_page(page);
 			ceph_queue_writeback(inode);
-			wait_event_interruptible(ci->i_cap_wq,
+			r = wait_event_interruptible(ci->i_cap_wq,
 			       context_is_writeable_or_written(inode, snapc));
 			ceph_put_snap_context(snapc);
+			if (r == -ERESTARTSYS)
+				return r;
 			return -EAGAIN;
 		}
 
@@ -1035,7 +1041,7 @@
 	int r;
 
 	do {
-		/* get a page*/
+		/* get a page */
 		page = grab_cache_page_write_begin(mapping, index, 0);
 		if (!page)
 			return -ENOMEM;
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c
index f031842..8d8a849 100644
--- a/fs/ceph/auth_x.c
+++ b/fs/ceph/auth_x.c
@@ -28,6 +28,12 @@
 	return (ac->want_keys & xi->have_keys) == ac->want_keys;
 }
 
+static int ceph_x_encrypt_buflen(int ilen)
+{
+	return sizeof(struct ceph_x_encrypt_header) + ilen + 16 +
+		sizeof(u32);
+}
+
 static int ceph_x_encrypt(struct ceph_crypto_key *secret,
 			  void *ibuf, int ilen, void *obuf, size_t olen)
 {
@@ -150,6 +156,11 @@
 		struct timespec validity;
 		struct ceph_crypto_key old_key;
 		void *tp, *tpend;
+		struct ceph_timespec new_validity;
+		struct ceph_crypto_key new_session_key;
+		struct ceph_buffer *new_ticket_blob;
+		unsigned long new_expires, new_renew_after;
+		u64 new_secret_id;
 
 		ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
 
@@ -182,16 +193,16 @@
 			goto bad;
 
 		memcpy(&old_key, &th->session_key, sizeof(old_key));
-		ret = ceph_crypto_key_decode(&th->session_key, &dp, dend);
+		ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
 		if (ret)
 			goto out;
 
-		ceph_decode_copy(&dp, &th->validity, sizeof(th->validity));
-		ceph_decode_timespec(&validity, &th->validity);
-		th->expires = get_seconds() + validity.tv_sec;
-		th->renew_after = th->expires - (validity.tv_sec / 4);
-		dout(" expires=%lu renew_after=%lu\n", th->expires,
-		     th->renew_after);
+		ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
+		ceph_decode_timespec(&validity, &new_validity);
+		new_expires = get_seconds() + validity.tv_sec;
+		new_renew_after = new_expires - (validity.tv_sec / 4);
+		dout(" expires=%lu renew_after=%lu\n", new_expires,
+		     new_renew_after);
 
 		/* ticket blob for service */
 		ceph_decode_8_safe(&p, end, is_enc, bad);
@@ -216,10 +227,21 @@
 		dout(" ticket blob is %d bytes\n", dlen);
 		ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
 		struct_v = ceph_decode_8(&tp);
-		th->secret_id = ceph_decode_64(&tp);
-		ret = ceph_decode_buffer(&th->ticket_blob, &tp, tpend);
+		new_secret_id = ceph_decode_64(&tp);
+		ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
 		if (ret)
 			goto out;
+
+		/* all is well, update our ticket */
+		ceph_crypto_key_destroy(&th->session_key);
+		if (th->ticket_blob)
+			ceph_buffer_put(th->ticket_blob);
+		th->session_key = new_session_key;
+		th->ticket_blob = new_ticket_blob;
+		th->validity = new_validity;
+		th->secret_id = new_secret_id;
+		th->expires = new_expires;
+		th->renew_after = new_renew_after;
 		dout(" got ticket service %d (%s) secret_id %lld len %d\n",
 		     type, ceph_entity_type_name(type), th->secret_id,
 		     (int)th->ticket_blob->vec.iov_len);
@@ -242,7 +264,7 @@
 				   struct ceph_x_ticket_handler *th,
 				   struct ceph_x_authorizer *au)
 {
-	int len;
+	int maxlen;
 	struct ceph_x_authorize_a *msg_a;
 	struct ceph_x_authorize_b msg_b;
 	void *p, *end;
@@ -253,15 +275,15 @@
 	dout("build_authorizer for %s %p\n",
 	     ceph_entity_type_name(th->service), au);
 
-	len = sizeof(*msg_a) + sizeof(msg_b) + sizeof(u32) +
-		ticket_blob_len + 16;
-	dout("  need len %d\n", len);
-	if (au->buf && au->buf->alloc_len < len) {
+	maxlen = sizeof(*msg_a) + sizeof(msg_b) +
+		ceph_x_encrypt_buflen(ticket_blob_len);
+	dout("  need len %d\n", maxlen);
+	if (au->buf && au->buf->alloc_len < maxlen) {
 		ceph_buffer_put(au->buf);
 		au->buf = NULL;
 	}
 	if (!au->buf) {
-		au->buf = ceph_buffer_new(len, GFP_NOFS);
+		au->buf = ceph_buffer_new(maxlen, GFP_NOFS);
 		if (!au->buf)
 			return -ENOMEM;
 	}
@@ -296,6 +318,7 @@
 	au->buf->vec.iov_len = p - au->buf->vec.iov_base;
 	dout(" built authorizer nonce %llx len %d\n", au->nonce,
 	     (int)au->buf->vec.iov_len);
+	BUG_ON(au->buf->vec.iov_len > maxlen);
 	return 0;
 
 out_buf:
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index db122bb..7d0a0d0 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1407,6 +1407,7 @@
  */
 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
 		     struct ceph_mds_session *session)
+	__releases(session->s_mutex)
 {
 	struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
 	struct ceph_mds_client *mdsc = &client->mdsc;
@@ -1414,7 +1415,6 @@
 	struct ceph_cap *cap;
 	int file_wanted, used;
 	int took_snap_rwsem = 0;             /* true if mdsc->snap_rwsem held */
-	int drop_session_lock = session ? 0 : 1;
 	int issued, implemented, want, retain, revoking, flushing = 0;
 	int mds = -1;   /* keep track of how far we've gone through i_caps list
 			   to avoid an infinite loop on retry */
@@ -1639,7 +1639,7 @@
 	if (queue_invalidate)
 		ceph_queue_invalidate(inode);
 
-	if (session && drop_session_lock)
+	if (session)
 		mutex_unlock(&session->s_mutex);
 	if (took_snap_rwsem)
 		up_read(&mdsc->snap_rwsem);
@@ -2195,18 +2195,19 @@
  * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
  * actually be a revocation if it specifies a smaller cap set.)
  *
- * caller holds s_mutex.
+ * caller holds s_mutex and i_lock, we drop both.
+ *
  * return value:
  *  0 - ok
  *  1 - check_caps on auth cap only (writeback)
  *  2 - check_caps (ack revoke)
  */
-static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
-			    struct ceph_mds_session *session,
-			    struct ceph_cap *cap,
-			    struct ceph_buffer *xattr_buf)
+static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
+			     struct ceph_mds_session *session,
+			     struct ceph_cap *cap,
+			     struct ceph_buffer *xattr_buf)
 	__releases(inode->i_lock)
-
+	__releases(session->s_mutex)
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	int mds = session->s_mds;
@@ -2216,7 +2217,7 @@
 	u64 size = le64_to_cpu(grant->size);
 	u64 max_size = le64_to_cpu(grant->max_size);
 	struct timespec mtime, atime, ctime;
-	int reply = 0;
+	int check_caps = 0;
 	int wake = 0;
 	int writeback = 0;
 	int revoked_rdcache = 0;
@@ -2329,11 +2330,12 @@
 		if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER)
 			writeback = 1; /* will delay ack */
 		else if (dirty & ~newcaps)
-			reply = 1;     /* initiate writeback in check_caps */
+			check_caps = 1;  /* initiate writeback in check_caps */
 		else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 ||
 			   revoked_rdcache)
-			reply = 2;     /* send revoke ack in check_caps */
+			check_caps = 2;     /* send revoke ack in check_caps */
 		cap->issued = newcaps;
+		cap->implemented |= newcaps;
 	} else if (cap->issued == newcaps) {
 		dout("caps unchanged: %s -> %s\n",
 		     ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
@@ -2346,6 +2348,7 @@
 					      * pending revocation */
 		wake = 1;
 	}
+	BUG_ON(cap->issued & ~cap->implemented);
 
 	spin_unlock(&inode->i_lock);
 	if (writeback)
@@ -2359,7 +2362,14 @@
 		ceph_queue_invalidate(inode);
 	if (wake)
 		wake_up(&ci->i_cap_wq);
-	return reply;
+
+	if (check_caps == 1)
+		ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
+				session);
+	else if (check_caps == 2)
+		ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
+	else
+		mutex_unlock(&session->s_mutex);
 }
 
 /*
@@ -2548,9 +2558,8 @@
 			ci->i_cap_exporting_issued = cap->issued;
 		}
 		__ceph_remove_cap(cap);
-	} else {
-		WARN_ON(!cap);
 	}
+	/* else, we already released it */
 
 	spin_unlock(&inode->i_lock);
 }
@@ -2621,9 +2630,7 @@
 	u64 cap_id;
 	u64 size, max_size;
 	u64 tid;
-	int check_caps = 0;
 	void *snaptrace;
-	int r;
 
 	dout("handle_caps from mds%d\n", mds);
 
@@ -2668,8 +2675,9 @@
 	case CEPH_CAP_OP_IMPORT:
 		handle_cap_import(mdsc, inode, h, session,
 				  snaptrace, le32_to_cpu(h->snap_trace_len));
-		check_caps = 1; /* we may have sent a RELEASE to the old auth */
-		goto done;
+		ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY,
+				session);
+		goto done_unlocked;
 	}
 
 	/* the rest require a cap */
@@ -2686,16 +2694,8 @@
 	switch (op) {
 	case CEPH_CAP_OP_REVOKE:
 	case CEPH_CAP_OP_GRANT:
-		r = handle_cap_grant(inode, h, session, cap, msg->middle);
-		if (r == 1)
-			ceph_check_caps(ceph_inode(inode),
-					CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
-					session);
-		else if (r == 2)
-			ceph_check_caps(ceph_inode(inode),
-					CHECK_CAPS_NODELAY,
-					session);
-		break;
+		handle_cap_grant(inode, h, session, cap, msg->middle);
+		goto done_unlocked;
 
 	case CEPH_CAP_OP_FLUSH_ACK:
 		handle_cap_flush_ack(inode, tid, h, session, cap);
@@ -2713,9 +2713,7 @@
 
 done:
 	mutex_unlock(&session->s_mutex);
-
-	if (check_caps)
-		ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY, NULL);
+done_unlocked:
 	if (inode)
 		iput(inode);
 	return;
@@ -2838,11 +2836,18 @@
 	struct ceph_cap *cap;
 	struct ceph_mds_request_release *rel = *p;
 	int ret = 0;
-
-	dout("encode_inode_release %p mds%d drop %s unless %s\n", inode,
-	     mds, ceph_cap_string(drop), ceph_cap_string(unless));
+	int used = 0;
 
 	spin_lock(&inode->i_lock);
+	used = __ceph_caps_used(ci);
+
+	dout("encode_inode_release %p mds%d used %s drop %s unless %s\n", inode,
+	     mds, ceph_cap_string(used), ceph_cap_string(drop),
+	     ceph_cap_string(unless));
+
+	/* only drop unused caps */
+	drop &= ~used;
+
 	cap = __get_cap_for_mds(ci, mds);
 	if (cap && __cap_is_valid(cap)) {
 		if (force ||
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 5107384..8a9116e 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -288,8 +288,10 @@
 			CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
 
 		/* discard old result, if any */
-		if (fi->last_readdir)
+		if (fi->last_readdir) {
 			ceph_mdsc_put_request(fi->last_readdir);
+			fi->last_readdir = NULL;
+		}
 
 		/* requery frag tree, as the frag topology may have changed */
 		frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 7abe1ae..aca82d5 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -378,6 +378,22 @@
 
 	ceph_queue_caps_release(inode);
 
+	/*
+	 * we may still have a snap_realm reference if there are stray
+	 * caps in i_cap_exporting_issued or i_snap_caps.
+	 */
+	if (ci->i_snap_realm) {
+		struct ceph_mds_client *mdsc =
+			&ceph_client(ci->vfs_inode.i_sb)->mdsc;
+		struct ceph_snap_realm *realm = ci->i_snap_realm;
+
+		dout(" dropping residual ref to snap realm %p\n", realm);
+		spin_lock(&realm->inodes_with_caps_lock);
+		list_del_init(&ci->i_snap_realm_item);
+		spin_unlock(&realm->inodes_with_caps_lock);
+		ceph_put_snap_realm(mdsc, realm);
+	}
+
 	kfree(ci->i_symlink);
 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
 		frag = rb_entry(n, struct ceph_inode_frag, node);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a260010..5c7920b 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -328,6 +328,8 @@
 	struct ceph_mds_session *s;
 
 	s = kzalloc(sizeof(*s), GFP_NOFS);
+	if (!s)
+		return ERR_PTR(-ENOMEM);
 	s->s_mdsc = mdsc;
 	s->s_mds = mds;
 	s->s_state = CEPH_MDS_SESSION_NEW;
@@ -529,7 +531,7 @@
 {
 	dout("__unregister_request %p tid %lld\n", req, req->r_tid);
 	rb_erase(&req->r_node, &mdsc->request_tree);
-	ceph_mdsc_put_request(req);
+	RB_CLEAR_NODE(&req->r_node);
 
 	if (req->r_unsafe_dir) {
 		struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
@@ -538,6 +540,8 @@
 		list_del_init(&req->r_unsafe_dir_item);
 		spin_unlock(&ci->i_unsafe_lock);
 	}
+
+	ceph_mdsc_put_request(req);
 }
 
 /*
@@ -862,6 +866,7 @@
 	if (time_after_eq(jiffies, session->s_cap_ttl) &&
 	    time_after_eq(session->s_cap_ttl, session->s_renew_requested))
 		pr_info("mds%d caps stale\n", session->s_mds);
+	session->s_renew_requested = jiffies;
 
 	/* do not try to renew caps until a recovering mds has reconnected
 	 * with its clients. */
@@ -874,7 +879,6 @@
 
 	dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
 		ceph_mds_state_name(state));
-	session->s_renew_requested = jiffies;
 	msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
 				 ++session->s_renew_seq);
 	if (IS_ERR(msg))
@@ -1566,8 +1570,13 @@
 
 	/* get, open session */
 	session = __ceph_lookup_mds_session(mdsc, mds);
-	if (!session)
+	if (!session) {
 		session = register_session(mdsc, mds);
+		if (IS_ERR(session)) {
+			err = PTR_ERR(session);
+			goto finish;
+		}
+	}
 	dout("do_request mds%d session %p state %s\n", mds, session,
 	     session_state_name(session->s_state));
 	if (session->s_state != CEPH_MDS_SESSION_OPEN &&
@@ -1770,7 +1779,7 @@
 	dout("handle_reply %p\n", req);
 
 	/* correct session? */
-	if (!req->r_session && req->r_session != session) {
+	if (req->r_session != session) {
 		pr_err("mdsc_handle_reply got %llu on session mds%d"
 		       " not mds%d\n", tid, session->s_mds,
 		       req->r_session ? req->r_session->s_mds : -1);
@@ -2682,29 +2691,41 @@
  */
 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
 {
-	struct ceph_mds_request *req = NULL;
+	struct ceph_mds_request *req = NULL, *nextreq;
 	struct rb_node *n;
 
 	mutex_lock(&mdsc->mutex);
 	dout("wait_unsafe_requests want %lld\n", want_tid);
+restart:
 	req = __get_oldest_req(mdsc);
 	while (req && req->r_tid <= want_tid) {
+		/* find next request */
+		n = rb_next(&req->r_node);
+		if (n)
+			nextreq = rb_entry(n, struct ceph_mds_request, r_node);
+		else
+			nextreq = NULL;
 		if ((req->r_op & CEPH_MDS_OP_WRITE)) {
 			/* write op */
 			ceph_mdsc_get_request(req);
+			if (nextreq)
+				ceph_mdsc_get_request(nextreq);
 			mutex_unlock(&mdsc->mutex);
 			dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
 			     req->r_tid, want_tid);
 			wait_for_completion(&req->r_safe_completion);
 			mutex_lock(&mdsc->mutex);
-			n = rb_next(&req->r_node);
 			ceph_mdsc_put_request(req);
-		} else {
-			n = rb_next(&req->r_node);
+			if (!nextreq)
+				break;  /* next dne before, so we're done! */
+			if (RB_EMPTY_NODE(&nextreq->r_node)) {
+				/* next request was removed from tree */
+				ceph_mdsc_put_request(nextreq);
+				goto restart;
+			}
+			ceph_mdsc_put_request(nextreq);  /* won't go away */
 		}
-		if (!n)
-			break;
-		req = rb_entry(n, struct ceph_mds_request, r_node);
+		req = nextreq;
 	}
 	mutex_unlock(&mdsc->mutex);
 	dout("wait_unsafe_requests done\n");
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
index 781656a..a32f0f8 100644
--- a/fs/ceph/messenger.c
+++ b/fs/ceph/messenger.c
@@ -366,6 +366,14 @@
 }
 
 /*
+ * return true if this connection ever successfully opened
+ */
+bool ceph_con_opened(struct ceph_connection *con)
+{
+	return con->connect_seq > 0;
+}
+
+/*
  * generic get/put
  */
 struct ceph_connection *ceph_con_get(struct ceph_connection *con)
@@ -830,13 +838,6 @@
 	con->in_base_pos = 0;
 }
 
-static void prepare_read_connect_retry(struct ceph_connection *con)
-{
-	dout("prepare_read_connect_retry %p\n", con);
-	con->in_base_pos = strlen(CEPH_BANNER) + sizeof(con->actual_peer_addr)
-		+ sizeof(con->peer_addr_for_me);
-}
-
 static void prepare_read_ack(struct ceph_connection *con)
 {
 	dout("prepare_read_ack %p\n", con);
@@ -1146,7 +1147,7 @@
 		}
 		con->auth_retry = 1;
 		prepare_write_connect(con->msgr, con, 0);
-		prepare_read_connect_retry(con);
+		prepare_read_connect(con);
 		break;
 
 	case CEPH_MSGR_TAG_RESETSESSION:
@@ -1843,8 +1844,6 @@
 		goto out;
 	}
 
-	clear_bit(BUSY, &con->state);  /* to avoid an improbable race */
-
 	mutex_lock(&con->mutex);
 	if (test_bit(CLOSED, &con->state))
 		goto out_unlock;
diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h
index 4caaa59..a343dae 100644
--- a/fs/ceph/messenger.h
+++ b/fs/ceph/messenger.h
@@ -223,6 +223,7 @@
 			  struct ceph_connection *con);
 extern void ceph_con_open(struct ceph_connection *con,
 			  struct ceph_entity_addr *addr);
+extern bool ceph_con_opened(struct ceph_connection *con);
 extern void ceph_con_close(struct ceph_connection *con);
 extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg);
 extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg);
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
index dbe63db9..c7b4ded 100644
--- a/fs/ceph/osd_client.c
+++ b/fs/ceph/osd_client.c
@@ -413,11 +413,22 @@
  */
 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
 {
+	struct ceph_osd_request *req;
 	int ret = 0;
 
 	dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
 	if (list_empty(&osd->o_requests)) {
 		__remove_osd(osdc, osd);
+	} else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
+			  &osd->o_con.peer_addr,
+			  sizeof(osd->o_con.peer_addr)) == 0 &&
+		   !ceph_con_opened(&osd->o_con)) {
+		dout(" osd addr hasn't changed and connection never opened,"
+		     " letting msgr retry");
+		/* touch each r_stamp for handle_timeout()'s benfit */
+		list_for_each_entry(req, &osd->o_requests, r_osd_item)
+			req->r_stamp = jiffies;
+		ret = -EAGAIN;
 	} else {
 		ceph_con_close(&osd->o_con);
 		ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
@@ -633,7 +644,7 @@
 	reqhead->flags |= cpu_to_le32(req->r_flags);  /* e.g., RETRY */
 	reqhead->reassert_version = req->r_reassert_version;
 
-	req->r_sent_stamp = jiffies;
+	req->r_stamp = jiffies;
 	list_move_tail(&osdc->req_lru, &req->r_req_lru_item);
 
 	ceph_msg_get(req->r_request); /* send consumes a ref */
@@ -660,7 +671,7 @@
 	unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ;
 	unsigned long keepalive =
 		osdc->client->mount_args->osd_keepalive_timeout * HZ;
-	unsigned long last_sent = 0;
+	unsigned long last_stamp = 0;
 	struct rb_node *p;
 	struct list_head slow_osds;
 
@@ -697,12 +708,12 @@
 		req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
 				 r_req_lru_item);
 
-		if (time_before(jiffies, req->r_sent_stamp + timeout))
+		if (time_before(jiffies, req->r_stamp + timeout))
 			break;
 
-		BUG_ON(req == last_req && req->r_sent_stamp == last_sent);
+		BUG_ON(req == last_req && req->r_stamp == last_stamp);
 		last_req = req;
-		last_sent = req->r_sent_stamp;
+		last_stamp = req->r_stamp;
 
 		osd = req->r_osd;
 		BUG_ON(!osd);
@@ -718,7 +729,7 @@
 	 */
 	INIT_LIST_HEAD(&slow_osds);
 	list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
-		if (time_before(jiffies, req->r_sent_stamp + keepalive))
+		if (time_before(jiffies, req->r_stamp + keepalive))
 			break;
 
 		osd = req->r_osd;
@@ -862,7 +873,9 @@
 
 	dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1);
 	if (kickosd) {
-		__reset_osd(osdc, kickosd);
+		err = __reset_osd(osdc, kickosd);
+		if (err == -EAGAIN)
+			return 1;
 	} else {
 		for (p = rb_first(&osdc->osds); p; p = n) {
 			struct ceph_osd *osd =
@@ -913,7 +926,7 @@
 
 kick:
 		dout("kicking %p tid %llu osd%d\n", req, req->r_tid,
-		     req->r_osd->o_osd);
+		     req->r_osd ? req->r_osd->o_osd : -1);
 		req->r_flags |= CEPH_OSD_FLAG_RETRY;
 		err = __send_request(osdc, req);
 		if (err) {
diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h
index 1b1a3ca..b075991 100644
--- a/fs/ceph/osd_client.h
+++ b/fs/ceph/osd_client.h
@@ -70,7 +70,7 @@
 
 	char              r_oid[40];          /* object name */
 	int               r_oid_len;
-	unsigned long     r_sent_stamp;
+	unsigned long     r_stamp;            /* send OR check time */
 	bool              r_resend;           /* msg send failed, needs retry */
 
 	struct ceph_file_layout r_file_layout;
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
index b83f269..d82fe87 100644
--- a/fs/ceph/osdmap.c
+++ b/fs/ceph/osdmap.c
@@ -480,6 +480,14 @@
 	return NULL;
 }
 
+void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
+{
+	ceph_decode_copy(p, &pi->v, sizeof(pi->v));
+	calc_pg_masks(pi);
+	*p += le32_to_cpu(pi->v.num_snaps) * sizeof(u64);
+	*p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
+}
+
 /*
  * decode a full map.
  */
@@ -526,12 +534,8 @@
 				   ev, CEPH_PG_POOL_VERSION);
 			goto bad;
 		}
-		ceph_decode_copy(p, &pi->v, sizeof(pi->v));
+		__decode_pool(p, pi);
 		__insert_pg_pool(&map->pg_pools, pi);
-		calc_pg_masks(pi);
-		*p += le32_to_cpu(pi->v.num_snaps) * sizeof(u64);
-		*p += le32_to_cpu(pi->v.num_removed_snap_intervals)
-			* sizeof(u64) * 2;
 	}
 	ceph_decode_32_safe(p, end, map->pool_max, bad);
 
@@ -714,8 +718,7 @@
 			pi->id = pool;
 			__insert_pg_pool(&map->pg_pools, pi);
 		}
-		ceph_decode_copy(p, &pi->v, sizeof(pi->v));
-		calc_pg_masks(pi);
+		__decode_pool(p, pi);
 	}
 
 	/* old_pool */
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index bf2a5f3..df04e21 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -314,9 +314,9 @@
 	   because we rebuild_snap_realms() works _downward_ in
 	   hierarchy after each update.) */
 	if (realm->cached_context &&
-	    realm->cached_context->seq <= realm->seq &&
+	    realm->cached_context->seq == realm->seq &&
 	    (!parent ||
-	     realm->cached_context->seq <= parent->cached_context->seq)) {
+	     realm->cached_context->seq >= parent->cached_context->seq)) {
 		dout("build_snap_context %llx %p: %p seq %lld (%d snaps)"
 		     " (unchanged)\n",
 		     realm->ino, realm, realm->cached_context,
@@ -818,7 +818,9 @@
 			 * queued (again) by ceph_update_snap_trace()
 			 * below.  Queue it _now_, under the old context.
 			 */
+			spin_lock(&realm->inodes_with_caps_lock);
 			list_del_init(&ci->i_snap_realm_item);
+			spin_unlock(&realm->inodes_with_caps_lock);
 			spin_unlock(&inode->i_lock);
 
 			ceph_queue_cap_snap(ci,
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index ef9008b..0d0e97e 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -582,7 +582,9 @@
 	inode->i_generation = sbi->s_next_generation++;
 	spin_unlock(&sbi->s_next_gen_lock);
 
-	ei->i_state = EXT3_STATE_NEW;
+	ei->i_state_flags = 0;
+	ext3_set_inode_state(inode, EXT3_STATE_NEW);
+
 	ei->i_extra_isize =
 		(EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
 		sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 7f920b7..ea33bdf 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2811,7 +2811,7 @@
 	inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
 	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
 
-	ei->i_state = 0;
+	ei->i_state_flags = 0;
 	ei->i_dir_start_lookup = 0;
 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
 	/* We now have enough fields to check if the inode was active or not.
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index c1ef501..6fcc7e7 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -309,7 +309,7 @@
 {
 	struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
 	wchar_t *ip, *ext_start, *end, *name_start;
-	unsigned char base[9], ext[4], buf[8], *p;
+	unsigned char base[9], ext[4], buf[5], *p;
 	unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
 	int chl, chi;
 	int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
@@ -467,7 +467,7 @@
 			return 0;
 	}
 
-	i = jiffies & 0xffff;
+	i = jiffies;
 	sz = (jiffies >> 16) & 0x7;
 	if (baselen > 2) {
 		baselen = numtail2_baselen;
@@ -476,7 +476,7 @@
 	name_res[baselen + 4] = '~';
 	name_res[baselen + 5] = '1' + sz;
 	while (1) {
-		sprintf(buf, "%04X", i);
+		snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
 		memcpy(&name_res[baselen], buf, 4);
 		if (vfat_find_form(dir, name_res) < 0)
 			break;
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index e513ac5..0b589a9 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -53,7 +53,7 @@
 static void fscache_object_slow_work_put_ref(struct slow_work *);
 static int  fscache_object_slow_work_get_ref(struct slow_work *);
 static void fscache_object_slow_work_execute(struct slow_work *);
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
 static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *);
 #endif
 static void fscache_initialise_object(struct fscache_object *);
@@ -69,7 +69,7 @@
 	.get_ref	= fscache_object_slow_work_get_ref,
 	.put_ref	= fscache_object_slow_work_put_ref,
 	.execute	= fscache_object_slow_work_execute,
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
 	.desc		= fscache_object_slow_work_desc,
 #endif
 };
@@ -364,7 +364,7 @@
 /*
  * describe an object for slow-work debugging
  */
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
 static void fscache_object_slow_work_desc(struct slow_work *work,
 					  struct seq_file *m)
 {
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index 313e79a..9f6c928 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -500,7 +500,7 @@
 /*
  * describe an operation for slow-work debugging
  */
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
 static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
 {
 	struct fscache_operation *op =
@@ -517,7 +517,7 @@
 	.get_ref	= fscache_op_get_ref,
 	.put_ref	= fscache_op_put_ref,
 	.execute	= fscache_op_execute,
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
 	.desc		= fscache_op_desc,
 #endif
 };
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 9718c22..a5d0c56 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -80,6 +80,7 @@
 			prefetchw(&bvec->bv_page->flags);
 
 		end_page_writeback(page);
+		page_cache_release(page);
 	} while (bvec >= bio->bi_io_vec);
 	bio_put(bio);
 	if (atomic_dec_and_test(&super->s_pending_writes))
@@ -97,8 +98,10 @@
 	unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
 	int i;
 
+	if (max_pages > BIO_MAX_PAGES)
+		max_pages = BIO_MAX_PAGES;
 	bio = bio_alloc(GFP_NOFS, max_pages);
-	BUG_ON(!bio); /* FIXME: handle this */
+	BUG_ON(!bio);
 
 	for (i = 0; i < nr_pages; i++) {
 		if (i >= max_pages) {
@@ -191,8 +194,10 @@
 	unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
 	int i;
 
+	if (max_pages > BIO_MAX_PAGES)
+		max_pages = BIO_MAX_PAGES;
 	bio = bio_alloc(GFP_NOFS, max_pages);
-	BUG_ON(!bio); /* FIXME: handle this */
+	BUG_ON(!bio);
 
 	for (i = 0; i < nr_pages; i++) {
 		if (i >= max_pages) {
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 56a8bfb..c76b4b5 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -303,12 +303,12 @@
 				(filler_t *)logfs_readpage, NULL);
 		if (IS_ERR(page))
 			return PTR_ERR(page);
-		dd = kmap_atomic(page, KM_USER0);
+		dd = kmap(page);
 		BUG_ON(dd->namelen == 0);
 
 		full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen),
 				pos, be64_to_cpu(dd->ino), dd->type);
-		kunmap_atomic(dd, KM_USER0);
+		kunmap(page);
 		page_cache_release(page);
 		if (full)
 			break;
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index 6ad30a4..d57c7b0 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -800,6 +800,7 @@
 {
 	struct logfs_super *super = logfs_super(sb);
 	struct logfs_area *area = super->s_journal_area;
+	struct btree_head32 *head = &super->s_reserved_segments;
 	u32 segno, ec;
 	int i, err;
 
@@ -807,6 +808,7 @@
 	/* Drop old segments */
 	journal_for_each(i)
 		if (super->s_journal_seg[i]) {
+			btree_remove32(head, super->s_journal_seg[i]);
 			logfs_set_segment_unreserved(sb,
 					super->s_journal_seg[i],
 					super->s_journal_ec[i]);
@@ -819,8 +821,13 @@
 		super->s_journal_seg[i] = segno;
 		super->s_journal_ec[i] = ec;
 		logfs_set_segment_reserved(sb, segno);
+		err = btree_insert32(head, segno, (void *)1, GFP_KERNEL);
+		BUG_ON(err); /* mempool should prevent this */
+		err = logfs_erase_segment(sb, segno, 1);
+		BUG_ON(err); /* FIXME: remount-ro would be nicer */
 	}
 	/* Manually move journal_area */
+	freeseg(sb, area->a_segno);
 	area->a_segno = super->s_journal_seg[0];
 	area->a_is_open = 0;
 	area->a_used_bytes = 0;
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index 1297794..b84b0ee 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -587,6 +587,7 @@
 int logfs_init_mapping(struct super_block *sb);
 void logfs_sync_area(struct logfs_area *area);
 void logfs_sync_segments(struct super_block *sb);
+void freeseg(struct super_block *sb, u32 segno);
 
 /* area handling */
 int logfs_init_areas(struct super_block *sb);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 7a23b3e..c3a3a68 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1594,7 +1594,6 @@
 	return ret;
 }
 
-/* Rewrite cannot mark the inode dirty but has to write it immediatly. */
 int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
 		gc_level_t gc_level, long flags)
 {
@@ -1611,6 +1610,18 @@
 		if (level != 0)
 			alloc_indirect_block(inode, page, 0);
 		err = logfs_write_buf(inode, page, flags);
+		if (!err && shrink_level(gc_level) == 0) {
+			/* Rewrite cannot mark the inode dirty but has to
+			 * write it immediatly.
+			 * Q: Can't we just create an alias for the inode
+			 * instead?  And if not, why not?
+			 */
+			if (inode->i_ino == LOGFS_INO_MASTER)
+				logfs_write_anchor(inode->i_sb);
+			else {
+				err = __logfs_write_inode(inode, flags);
+			}
+		}
 	}
 	logfs_put_write_page(page);
 	return err;
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index 1a14f99..0ecd8f0 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -93,50 +93,58 @@
 	} while (len);
 }
 
-/*
- * bdev_writeseg will write full pages.  Memset the tail to prevent data leaks.
- */
-static void pad_wbuf(struct logfs_area *area, int final)
+static void pad_partial_page(struct logfs_area *area)
 {
 	struct super_block *sb = area->a_sb;
-	struct logfs_super *super = logfs_super(sb);
 	struct page *page;
 	u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
 	pgoff_t index = ofs >> PAGE_SHIFT;
 	long offset = ofs & (PAGE_SIZE-1);
 	u32 len = PAGE_SIZE - offset;
 
-	if (len == PAGE_SIZE) {
-		/* The math in this function can surely use some love */
-		len = 0;
-	}
-	if (len) {
-		BUG_ON(area->a_used_bytes >= super->s_segsize);
-
-		page = get_mapping_page(area->a_sb, index, 0);
+	if (len % PAGE_SIZE) {
+		page = get_mapping_page(sb, index, 0);
 		BUG_ON(!page); /* FIXME: reserve a pool */
 		memset(page_address(page) + offset, 0xff, len);
 		SetPagePrivate(page);
 		page_cache_release(page);
 	}
+}
 
-	if (!final)
-		return;
+static void pad_full_pages(struct logfs_area *area)
+{
+	struct super_block *sb = area->a_sb;
+	struct logfs_super *super = logfs_super(sb);
+	u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
+	u32 len = super->s_segsize - area->a_used_bytes;
+	pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
+	pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
+	struct page *page;
 
-	area->a_used_bytes += len;
-	for ( ; area->a_used_bytes < super->s_segsize;
-			area->a_used_bytes += PAGE_SIZE) {
-		/* Memset another page */
-		index++;
-		page = get_mapping_page(area->a_sb, index, 0);
+	while (no_indizes) {
+		page = get_mapping_page(sb, index, 0);
 		BUG_ON(!page); /* FIXME: reserve a pool */
-		memset(page_address(page), 0xff, PAGE_SIZE);
+		SetPageUptodate(page);
+		memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
 		SetPagePrivate(page);
 		page_cache_release(page);
+		index++;
+		no_indizes--;
 	}
 }
 
 /*
+ * bdev_writeseg will write full pages.  Memset the tail to prevent data leaks.
+ * Also make sure we allocate (and memset) all pages for final writeout.
+ */
+static void pad_wbuf(struct logfs_area *area, int final)
+{
+	pad_partial_page(area);
+	if (final)
+		pad_full_pages(area);
+}
+
+/*
  * We have to be careful with the alias tree.  Since lookup is done by bix,
  * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with
  * indirect blocks.  So always use it through accessor functions.
@@ -683,7 +691,7 @@
 	return 0;
 }
 
-static void freeseg(struct super_block *sb, u32 segno)
+void freeseg(struct super_block *sb, u32 segno)
 {
 	struct logfs_super *super = logfs_super(sb);
 	struct address_space *mapping = super->s_mapping_inode->i_mapping;
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index c66beab..9d856c4 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -277,7 +277,7 @@
 	}
 	if (valid0 && valid1 && ds_cmp(ds0, ds1)) {
 		printk(KERN_INFO"Superblocks don't match - fixing.\n");
-		return write_one_sb(sb, super->s_devops->find_last_sb);
+		return logfs_write_sb(sb);
 	}
 	/* If neither is valid now, something's wrong.  Didn't we properly
 	 * check them before?!? */
@@ -289,6 +289,10 @@
 {
 	int err;
 
+	err = logfs_open_segfile(sb);
+	if (err)
+		return err;
+
 	/* Repair any broken superblock copies */
 	err = logfs_recover_sb(sb);
 	if (err)
@@ -299,10 +303,6 @@
 	if (err)
 		return err;
 
-	err = logfs_open_segfile(sb);
-	if (err)
-		return err;
-
 	/* Do one GC pass before any data gets dirtied */
 	logfs_gc_pass(sb);
 
@@ -328,7 +328,7 @@
 
 	sb->s_root = d_alloc_root(rootdir);
 	if (!sb->s_root)
-		goto fail;
+		goto fail2;
 
 	super->s_erase_page = alloc_pages(GFP_KERNEL, 0);
 	if (!super->s_erase_page)
@@ -572,8 +572,7 @@
 	return 0;
 
 err1:
-	up_write(&sb->s_umount);
-	deactivate_super(sb);
+	deactivate_locked_super(sb);
 	return err;
 err0:
 	kfree(super);
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 0501974..8ccf0f8 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -30,6 +30,8 @@
 #include "alloc.h"
 #include "dlmglue.h"
 #include "file.h"
+#include "inode.h"
+#include "journal.h"
 #include "ocfs2_fs.h"
 
 #include "xattr.h"
@@ -166,6 +168,60 @@
 }
 
 /*
+ * Helper function to set i_mode in memory and disk. Some call paths
+ * will not have di_bh or a journal handle to pass, in which case it
+ * will create it's own.
+ */
+static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
+			      handle_t *handle, umode_t new_mode)
+{
+	int ret, commit_handle = 0;
+	struct ocfs2_dinode *di;
+
+	if (di_bh == NULL) {
+		ret = ocfs2_read_inode_block(inode, &di_bh);
+		if (ret) {
+			mlog_errno(ret);
+			goto out;
+		}
+	} else
+		get_bh(di_bh);
+
+	if (handle == NULL) {
+		handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
+					   OCFS2_INODE_UPDATE_CREDITS);
+		if (IS_ERR(handle)) {
+			ret = PTR_ERR(handle);
+			mlog_errno(ret);
+			goto out_brelse;
+		}
+
+		commit_handle = 1;
+	}
+
+	di = (struct ocfs2_dinode *)di_bh->b_data;
+	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+				      OCFS2_JOURNAL_ACCESS_WRITE);
+	if (ret) {
+		mlog_errno(ret);
+		goto out_commit;
+	}
+
+	inode->i_mode = new_mode;
+	di->i_mode = cpu_to_le16(inode->i_mode);
+
+	ocfs2_journal_dirty(handle, di_bh);
+
+out_commit:
+	if (commit_handle)
+		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+out_brelse:
+	brelse(di_bh);
+out:
+	return ret;
+}
+
+/*
  * Set the access or default ACL of an inode.
  */
 static int ocfs2_set_acl(handle_t *handle,
@@ -193,9 +249,14 @@
 			if (ret < 0)
 				return ret;
 			else {
-				inode->i_mode = mode;
 				if (ret == 0)
 					acl = NULL;
+
+				ret = ocfs2_acl_set_mode(inode, di_bh,
+							 handle, mode);
+				if (ret)
+					return ret;
+
 			}
 		}
 		break;
@@ -283,6 +344,7 @@
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	struct posix_acl *acl = NULL;
 	int ret = 0;
+	mode_t mode;
 
 	if (!S_ISLNK(inode->i_mode)) {
 		if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
@@ -291,12 +353,17 @@
 			if (IS_ERR(acl))
 				return PTR_ERR(acl);
 		}
-		if (!acl)
-			inode->i_mode &= ~current_umask();
+		if (!acl) {
+			mode = inode->i_mode & ~current_umask();
+			ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+			if (ret) {
+				mlog_errno(ret);
+				goto cleanup;
+			}
+		}
 	}
 	if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
 		struct posix_acl *clone;
-		mode_t mode;
 
 		if (S_ISDIR(inode->i_mode)) {
 			ret = ocfs2_set_acl(handle, inode, di_bh,
@@ -313,7 +380,7 @@
 		mode = inode->i_mode;
 		ret = posix_acl_create_masq(clone, &mode);
 		if (ret >= 0) {
-			inode->i_mode = mode;
+			ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
 			if (ret > 0) {
 				ret = ocfs2_set_acl(handle, inode,
 						    di_bh, ACL_TYPE_ACCESS,
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index a659606..9289b43 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1875,7 +1875,6 @@
 ok:
 		spin_unlock(&res->spinlock);
 	}
-	spin_unlock(&dlm->spinlock);
 
 	// mlog(0, "woo!  got an assert_master from node %u!\n",
 	// 	     assert->node_idx);
@@ -1926,7 +1925,6 @@
 		/* master is known, detach if not already detached.
 		 * ensures that only one assert_master call will happen
 		 * on this mle. */
-		spin_lock(&dlm->spinlock);
 		spin_lock(&dlm->master_lock);
 
 		rr = atomic_read(&mle->mle_refs.refcount);
@@ -1959,7 +1957,6 @@
 			__dlm_put_mle(mle);
 		}
 		spin_unlock(&dlm->master_lock);
-		spin_unlock(&dlm->spinlock);
 	} else if (res) {
 		if (res->owner != assert->node_idx) {
 			mlog(0, "assert_master from %u, but current "
@@ -1967,6 +1964,7 @@
 			     res->owner, namelen, name);
 		}
 	}
+	spin_unlock(&dlm->spinlock);
 
 done:
 	ret = 0;
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 278a223..ab20790 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -891,6 +891,21 @@
 	/* Do some basic inode verification... */
 	di = (struct ocfs2_dinode *) di_bh->b_data;
 	if (!(di->i_flags & cpu_to_le32(OCFS2_ORPHANED_FL))) {
+		/*
+		 * Inodes in the orphan dir must have ORPHANED_FL.  The only
+		 * inodes that come back out of the orphan dir are reflink
+		 * targets. A reflink target may be moved out of the orphan
+		 * dir between the time we scan the directory and the time we
+		 * process it. This would lead to HAS_REFCOUNT_FL being set but
+		 * ORPHANED_FL not.
+		 */
+		if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) {
+			mlog(0, "Reflinked inode %llu is no longer orphaned.  "
+			     "it shouldn't be deleted\n",
+			     (unsigned long long)oi->ip_blkno);
+			goto bail;
+		}
+
 		/* for lack of a better error? */
 		status = -EEXIST;
 		mlog(ML_ERROR,
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index ca992d9..c983715 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -872,8 +872,10 @@
 			     (unsigned long long)la_start_blk,
 			     (unsigned long long)blkno);
 
-			status = ocfs2_free_clusters(handle, main_bm_inode,
-						     main_bm_bh, blkno, count);
+			status = ocfs2_release_clusters(handle,
+							main_bm_inode,
+							main_bm_bh, blkno,
+							count);
 			if (status < 0) {
 				mlog_errno(status);
 				goto bail;
@@ -984,8 +986,7 @@
 	}
 
 retry_enospc:
-	(*ac)->ac_bits_wanted = osb->local_alloc_bits;
-
+	(*ac)->ac_bits_wanted = osb->local_alloc_default_bits;
 	status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
 	if (status == -ENOSPC) {
 		if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) ==
@@ -1061,6 +1062,7 @@
 		    OCFS2_LA_DISABLED)
 			goto bail;
 
+		ac->ac_bits_wanted = osb->local_alloc_default_bits;
 		status = ocfs2_claim_clusters(osb, handle, ac,
 					      osb->local_alloc_bits,
 					      &cluster_off,
diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c
index 544ac62..b5cb3ed 100644
--- a/fs/ocfs2/locks.c
+++ b/fs/ocfs2/locks.c
@@ -133,7 +133,7 @@
 
 	if (!(fl->fl_flags & FL_POSIX))
 		return -ENOLCK;
-	if (__mandatory_lock(inode))
+	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
 		return -ENOLCK;
 
 	return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index d9cd4e3..b1eb50a 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -84,7 +84,7 @@
 static int ocfs2_orphan_add(struct ocfs2_super *osb,
 			    handle_t *handle,
 			    struct inode *inode,
-			    struct ocfs2_dinode *fe,
+			    struct buffer_head *fe_bh,
 			    char *name,
 			    struct ocfs2_dir_lookup_result *lookup,
 			    struct inode *orphan_dir_inode);
@@ -879,7 +879,7 @@
 	fe = (struct ocfs2_dinode *) fe_bh->b_data;
 
 	if (inode_is_unlinkable(inode)) {
-		status = ocfs2_orphan_add(osb, handle, inode, fe, orphan_name,
+		status = ocfs2_orphan_add(osb, handle, inode, fe_bh, orphan_name,
 					  &orphan_insert, orphan_dir);
 		if (status < 0) {
 			mlog_errno(status);
@@ -1300,7 +1300,7 @@
 		if (S_ISDIR(new_inode->i_mode) ||
 		    (ocfs2_read_links_count(newfe) == 1)) {
 			status = ocfs2_orphan_add(osb, handle, new_inode,
-						  newfe, orphan_name,
+						  newfe_bh, orphan_name,
 						  &orphan_insert, orphan_dir);
 			if (status < 0) {
 				mlog_errno(status);
@@ -1911,7 +1911,7 @@
 static int ocfs2_orphan_add(struct ocfs2_super *osb,
 			    handle_t *handle,
 			    struct inode *inode,
-			    struct ocfs2_dinode *fe,
+			    struct buffer_head *fe_bh,
 			    char *name,
 			    struct ocfs2_dir_lookup_result *lookup,
 			    struct inode *orphan_dir_inode)
@@ -1919,6 +1919,7 @@
 	struct buffer_head *orphan_dir_bh = NULL;
 	int status = 0;
 	struct ocfs2_dinode *orphan_fe;
+	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
 
 	mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino);
 
@@ -1959,6 +1960,21 @@
 		goto leave;
 	}
 
+	/*
+	 * We're going to journal the change of i_flags and i_orphaned_slot.
+	 * It's safe anyway, though some callers may duplicate the journaling.
+	 * Journaling within the func just make the logic look more
+	 * straightforward.
+	 */
+	status = ocfs2_journal_access_di(handle,
+					 INODE_CACHE(inode),
+					 fe_bh,
+					 OCFS2_JOURNAL_ACCESS_WRITE);
+	if (status < 0) {
+		mlog_errno(status);
+		goto leave;
+	}
+
 	le32_add_cpu(&fe->i_flags, OCFS2_ORPHANED_FL);
 
 	/* Record which orphan dir our inode now resides
@@ -1966,6 +1982,8 @@
 	 * dir to lock. */
 	fe->i_orphaned_slot = cpu_to_le16(osb->slot_num);
 
+	ocfs2_journal_dirty(handle, fe_bh);
+
 	mlog(0, "Inode %llu orphaned in slot %d\n",
 	     (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num);
 
@@ -2123,7 +2141,7 @@
 	}
 
 	di = (struct ocfs2_dinode *)new_di_bh->b_data;
-	status = ocfs2_orphan_add(osb, handle, inode, di, orphan_name,
+	status = ocfs2_orphan_add(osb, handle, inode, new_di_bh, orphan_name,
 				  &orphan_insert, orphan_dir);
 	if (status < 0) {
 		mlog_errno(status);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 1238b49..adf5e2e 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -763,8 +763,18 @@
 	return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits);
 }
 
-#define ocfs2_set_bit ext2_set_bit
-#define ocfs2_clear_bit ext2_clear_bit
+static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
+{
+	ext2_set_bit(bit, bitmap);
+}
+#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
+
+static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
+{
+	ext2_clear_bit(bit, bitmap);
+}
+#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
+
 #define ocfs2_test_bit ext2_test_bit
 #define ocfs2_find_next_zero_bit ext2_find_next_zero_bit
 #define ocfs2_find_next_bit ext2_find_next_bit
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 9e96921..29405f2 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4075,6 +4075,7 @@
 	OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
 	spin_unlock(&OCFS2_I(t_inode)->ip_lock);
 	i_size_write(t_inode, size);
+	t_inode->i_blocks = s_inode->i_blocks;
 
 	di->i_xattr_inline_size = s_di->i_xattr_inline_size;
 	di->i_clusters = s_di->i_clusters;
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index c3c60bc..19ba00f 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -95,13 +95,6 @@
 					     struct buffer_head *group_bh,
 					     unsigned int bit_off,
 					     unsigned int num_bits);
-static inline int ocfs2_block_group_clear_bits(handle_t *handle,
-					       struct inode *alloc_inode,
-					       struct ocfs2_group_desc *bg,
-					       struct buffer_head *group_bh,
-					       unsigned int bit_off,
-					       unsigned int num_bits);
-
 static int ocfs2_relink_block_group(handle_t *handle,
 				    struct inode *alloc_inode,
 				    struct buffer_head *fe_bh,
@@ -152,7 +145,7 @@
 
 #define do_error(fmt, ...)						\
 	do{								\
-		if (clean_error)					\
+		if (resize)					\
 			mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__);	\
 		else							\
 			ocfs2_error(sb, fmt, ##__VA_ARGS__);		\
@@ -160,7 +153,7 @@
 
 static int ocfs2_validate_gd_self(struct super_block *sb,
 				  struct buffer_head *bh,
-				  int clean_error)
+				  int resize)
 {
 	struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
 
@@ -211,7 +204,7 @@
 static int ocfs2_validate_gd_parent(struct super_block *sb,
 				    struct ocfs2_dinode *di,
 				    struct buffer_head *bh,
-				    int clean_error)
+				    int resize)
 {
 	unsigned int max_bits;
 	struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
@@ -233,8 +226,11 @@
 		return -EINVAL;
 	}
 
-	if (le16_to_cpu(gd->bg_chain) >=
-	    le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
+	/* In resize, we may meet the case bg_chain == cl_next_free_rec. */
+	if ((le16_to_cpu(gd->bg_chain) >
+	     le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
+	    ((le16_to_cpu(gd->bg_chain) ==
+	     le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
 		do_error("Group descriptor #%llu has bad chain %u",
 			 (unsigned long long)bh->b_blocknr,
 			 le16_to_cpu(gd->bg_chain));
@@ -1975,18 +1971,18 @@
 				      bits_wanted, cluster_start, num_clusters);
 }
 
-static inline int ocfs2_block_group_clear_bits(handle_t *handle,
-					       struct inode *alloc_inode,
-					       struct ocfs2_group_desc *bg,
-					       struct buffer_head *group_bh,
-					       unsigned int bit_off,
-					       unsigned int num_bits)
+static int ocfs2_block_group_clear_bits(handle_t *handle,
+					struct inode *alloc_inode,
+					struct ocfs2_group_desc *bg,
+					struct buffer_head *group_bh,
+					unsigned int bit_off,
+					unsigned int num_bits,
+					void (*undo_fn)(unsigned int bit,
+							unsigned long *bmap))
 {
 	int status;
 	unsigned int tmp;
-	int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
 	struct ocfs2_group_desc *undo_bg = NULL;
-	int cluster_bitmap = 0;
 
 	mlog_entry_void();
 
@@ -1996,20 +1992,18 @@
 
 	mlog(0, "off = %u, num = %u\n", bit_off, num_bits);
 
-	if (ocfs2_is_cluster_bitmap(alloc_inode))
-		journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
-
+	BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode));
 	status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
-					 group_bh, journal_type);
+					 group_bh,
+					 undo_fn ?
+					 OCFS2_JOURNAL_ACCESS_UNDO :
+					 OCFS2_JOURNAL_ACCESS_WRITE);
 	if (status < 0) {
 		mlog_errno(status);
 		goto bail;
 	}
 
-	if (ocfs2_is_cluster_bitmap(alloc_inode))
-		cluster_bitmap = 1;
-
-	if (cluster_bitmap) {
+	if (undo_fn) {
 		jbd_lock_bh_state(group_bh);
 		undo_bg = (struct ocfs2_group_desc *)
 					bh2jh(group_bh)->b_committed_data;
@@ -2020,13 +2014,13 @@
 	while(tmp--) {
 		ocfs2_clear_bit((bit_off + tmp),
 				(unsigned long *) bg->bg_bitmap);
-		if (cluster_bitmap)
-			ocfs2_set_bit(bit_off + tmp,
-				      (unsigned long *) undo_bg->bg_bitmap);
+		if (undo_fn)
+			undo_fn(bit_off + tmp,
+				(unsigned long *) undo_bg->bg_bitmap);
 	}
 	le16_add_cpu(&bg->bg_free_bits_count, num_bits);
 
-	if (cluster_bitmap)
+	if (undo_fn)
 		jbd_unlock_bh_state(group_bh);
 
 	status = ocfs2_journal_dirty(handle, group_bh);
@@ -2039,12 +2033,14 @@
 /*
  * expects the suballoc inode to already be locked.
  */
-int ocfs2_free_suballoc_bits(handle_t *handle,
-			     struct inode *alloc_inode,
-			     struct buffer_head *alloc_bh,
-			     unsigned int start_bit,
-			     u64 bg_blkno,
-			     unsigned int count)
+static int _ocfs2_free_suballoc_bits(handle_t *handle,
+				     struct inode *alloc_inode,
+				     struct buffer_head *alloc_bh,
+				     unsigned int start_bit,
+				     u64 bg_blkno,
+				     unsigned int count,
+				     void (*undo_fn)(unsigned int bit,
+						     unsigned long *bitmap))
 {
 	int status = 0;
 	u32 tmp_used;
@@ -2079,7 +2075,7 @@
 
 	status = ocfs2_block_group_clear_bits(handle, alloc_inode,
 					      group, group_bh,
-					      start_bit, count);
+					      start_bit, count, undo_fn);
 	if (status < 0) {
 		mlog_errno(status);
 		goto bail;
@@ -2110,6 +2106,17 @@
 	return status;
 }
 
+int ocfs2_free_suballoc_bits(handle_t *handle,
+			     struct inode *alloc_inode,
+			     struct buffer_head *alloc_bh,
+			     unsigned int start_bit,
+			     u64 bg_blkno,
+			     unsigned int count)
+{
+	return _ocfs2_free_suballoc_bits(handle, alloc_inode, alloc_bh,
+					 start_bit, bg_blkno, count, NULL);
+}
+
 int ocfs2_free_dinode(handle_t *handle,
 		      struct inode *inode_alloc_inode,
 		      struct buffer_head *inode_alloc_bh,
@@ -2123,11 +2130,13 @@
 					inode_alloc_bh, bit, bg_blkno, 1);
 }
 
-int ocfs2_free_clusters(handle_t *handle,
-		       struct inode *bitmap_inode,
-		       struct buffer_head *bitmap_bh,
-		       u64 start_blk,
-		       unsigned int num_clusters)
+static int _ocfs2_free_clusters(handle_t *handle,
+				struct inode *bitmap_inode,
+				struct buffer_head *bitmap_bh,
+				u64 start_blk,
+				unsigned int num_clusters,
+				void (*undo_fn)(unsigned int bit,
+						unsigned long *bitmap))
 {
 	int status;
 	u16 bg_start_bit;
@@ -2154,9 +2163,9 @@
 	mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n",
 	     (unsigned long long)bg_blkno, bg_start_bit);
 
-	status = ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
-					  bg_start_bit, bg_blkno,
-					  num_clusters);
+	status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
+					   bg_start_bit, bg_blkno,
+					   num_clusters, undo_fn);
 	if (status < 0) {
 		mlog_errno(status);
 		goto out;
@@ -2170,6 +2179,32 @@
 	return status;
 }
 
+int ocfs2_free_clusters(handle_t *handle,
+			struct inode *bitmap_inode,
+			struct buffer_head *bitmap_bh,
+			u64 start_blk,
+			unsigned int num_clusters)
+{
+	return _ocfs2_free_clusters(handle, bitmap_inode, bitmap_bh,
+				    start_blk, num_clusters,
+				    _ocfs2_set_bit);
+}
+
+/*
+ * Give never-used clusters back to the global bitmap.  We don't need
+ * to protect these bits in the undo buffer.
+ */
+int ocfs2_release_clusters(handle_t *handle,
+			   struct inode *bitmap_inode,
+			   struct buffer_head *bitmap_bh,
+			   u64 start_blk,
+			   unsigned int num_clusters)
+{
+	return _ocfs2_free_clusters(handle, bitmap_inode, bitmap_bh,
+				    start_blk, num_clusters,
+				    _ocfs2_clear_bit);
+}
+
 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg)
 {
 	printk("Block Group:\n");
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index fa60723..e0f46df 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -127,6 +127,11 @@
 			struct buffer_head *bitmap_bh,
 			u64 start_blk,
 			unsigned int num_clusters);
+int ocfs2_release_clusters(handle_t *handle,
+			   struct inode *bitmap_inode,
+			   struct buffer_head *bitmap_bh,
+			   u64 start_blk,
+			   unsigned int num_clusters);
 
 static inline u64 ocfs2_which_suballoc_group(u64 block, unsigned int bit)
 {
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index d1b0d38..3e77730 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1622,7 +1622,7 @@
 	/* Now tell xh->xh_entries about it */
 	for (i = 0; i < count; i++) {
 		offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
-		if (offset < namevalue_offset)
+		if (offset <= namevalue_offset)
 			le16_add_cpu(&xh->xh_entries[i].xe_name_offset,
 				     namevalue_size);
 	}
@@ -6528,13 +6528,11 @@
 					  int indexed)
 {
 	int ret;
-	struct ocfs2_alloc_context *meta_ac;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-	struct ocfs2_xattr_set_ctxt ctxt = {
-		.meta_ac = meta_ac,
-	};
+	struct ocfs2_xattr_set_ctxt ctxt;
 
-	ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
+	memset(&ctxt, 0, sizeof(ctxt));
+	ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &ctxt.meta_ac);
 	if (ret < 0) {
 		mlog_errno(ret);
 		return ret;
@@ -6556,7 +6554,7 @@
 
 	ocfs2_commit_trans(osb, ctxt.handle);
 out:
-	ocfs2_free_alloc_context(meta_ac);
+	ocfs2_free_alloc_context(ctxt.meta_ac);
 	return ret;
 }
 
diff --git a/fs/proc/base.c b/fs/proc/base.c
index a731084..b1f6e62 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -442,12 +442,13 @@
 unsigned long badness(struct task_struct *p, unsigned long uptime);
 static int proc_oom_score(struct task_struct *task, char *buffer)
 {
-	unsigned long points;
+	unsigned long points = 0;
 	struct timespec uptime;
 
 	do_posix_clock_monotonic_gettime(&uptime);
 	read_lock(&tasklist_lock);
-	points = badness(task->group_leader, uptime.tv_sec);
+	if (pid_alive(task))
+		points = badness(task, uptime.tv_sec);
 	read_unlock(&tasklist_lock);
 	return sprintf(buffer, "%lu\n", points);
 }
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 183f8ff..0962739 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -406,6 +406,7 @@
 
 	memset(&mss, 0, sizeof mss);
 	mss.vma = vma;
+	/* mmap_sem is held in m_start */
 	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
 		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
 
@@ -552,7 +553,8 @@
 };
 
 struct pagemapread {
-	u64 __user *out, *end;
+	int pos, len;
+	u64 *buffer;
 };
 
 #define PM_ENTRY_BYTES      sizeof(u64)
@@ -575,10 +577,8 @@
 static int add_to_pagemap(unsigned long addr, u64 pfn,
 			  struct pagemapread *pm)
 {
-	if (put_user(pfn, pm->out))
-		return -EFAULT;
-	pm->out++;
-	if (pm->out >= pm->end)
+	pm->buffer[pm->pos++] = pfn;
+	if (pm->pos >= pm->len)
 		return PM_END_OF_BUFFER;
 	return 0;
 }
@@ -720,21 +720,20 @@
  * determine which areas of memory are actually mapped and llseek to
  * skip over unmapped regions.
  */
+#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
 static ssize_t pagemap_read(struct file *file, char __user *buf,
 			    size_t count, loff_t *ppos)
 {
 	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
-	struct page **pages, *page;
-	unsigned long uaddr, uend;
 	struct mm_struct *mm;
 	struct pagemapread pm;
-	int pagecount;
 	int ret = -ESRCH;
 	struct mm_walk pagemap_walk = {};
 	unsigned long src;
 	unsigned long svpfn;
 	unsigned long start_vaddr;
 	unsigned long end_vaddr;
+	int copied = 0;
 
 	if (!task)
 		goto out;
@@ -757,35 +756,12 @@
 	if (!mm)
 		goto out_task;
 
-
-	uaddr = (unsigned long)buf & PAGE_MASK;
-	uend = (unsigned long)(buf + count);
-	pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
-	ret = 0;
-	if (pagecount == 0)
-		goto out_mm;
-	pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
 	ret = -ENOMEM;
-	if (!pages)
+	if (!pm.buffer)
 		goto out_mm;
 
-	down_read(&current->mm->mmap_sem);
-	ret = get_user_pages(current, current->mm, uaddr, pagecount,
-			     1, 0, pages, NULL);
-	up_read(&current->mm->mmap_sem);
-
-	if (ret < 0)
-		goto out_free;
-
-	if (ret != pagecount) {
-		pagecount = ret;
-		ret = -EFAULT;
-		goto out_pages;
-	}
-
-	pm.out = (u64 __user *)buf;
-	pm.end = (u64 __user *)(buf + count);
-
 	pagemap_walk.pmd_entry = pagemap_pte_range;
 	pagemap_walk.pte_hole = pagemap_pte_hole;
 	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
@@ -807,23 +783,36 @@
 	 * user buffer is tracked in "pm", and the walk
 	 * will stop when we hit the end of the buffer.
 	 */
-	ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk);
-	if (ret == PM_END_OF_BUFFER)
-		ret = 0;
-	/* don't need mmap_sem for these, but this looks cleaner */
-	*ppos += (char __user *)pm.out - buf;
-	if (!ret)
-		ret = (char __user *)pm.out - buf;
+	ret = 0;
+	while (count && (start_vaddr < end_vaddr)) {
+		int len;
+		unsigned long end;
 
-out_pages:
-	for (; pagecount; pagecount--) {
-		page = pages[pagecount-1];
-		if (!PageReserved(page))
-			SetPageDirty(page);
-		page_cache_release(page);
+		pm.pos = 0;
+		end = start_vaddr + PAGEMAP_WALK_SIZE;
+		/* overflow ? */
+		if (end < start_vaddr || end > end_vaddr)
+			end = end_vaddr;
+		down_read(&mm->mmap_sem);
+		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
+		up_read(&mm->mmap_sem);
+		start_vaddr = end;
+
+		len = min(count, PM_ENTRY_BYTES * pm.pos);
+		if (copy_to_user(buf, pm.buffer, len) < 0) {
+			ret = -EFAULT;
+			goto out_free;
+		}
+		copied += len;
+		buf += len;
+		count -= len;
 	}
+	*ppos += copied;
+	if (!ret || ret == PM_END_OF_BUFFER)
+		ret = copied;
+
 out_free:
-	kfree(pages);
+	kfree(pm.buffer);
 out_mm:
 	mmput(mm);
 out_task:
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 04bf5d7..ab19051 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1618,10 +1618,8 @@
 	save_mount_options(s, data);
 
 	sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
-	if (!sbi) {
-		errval = -ENOMEM;
-		goto error_alloc;
-	}
+	if (!sbi)
+		return -ENOMEM;
 	s->s_fs_info = sbi;
 	/* Set default values for options: non-aggressive tails, RO on errors */
 	REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
@@ -1878,12 +1876,12 @@
 	return (0);
 
 error:
-	reiserfs_write_unlock(s);
-error_alloc:
 	if (jinit_done) {	/* kill the commit thread, free journal ram */
 		journal_release_error(NULL, s);
 	}
 
+	reiserfs_write_unlock(s);
+
 	reiserfs_free_bitmap_cache(s);
 	if (SB_BUFFER_WITH_SB(s))
 		brelse(SB_BUFFER_WITH_SB(s));
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 4a3c4e4..de2f82e 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1545,39 +1545,7 @@
 {
 }
 
-
-static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
-{
-	if (size != 0 && nmemb > ULONG_MAX / size)
-		return NULL;
-
-	if (size * nmemb <= PAGE_SIZE)
-	    return kcalloc(nmemb, size, GFP_KERNEL);
-
-	return __vmalloc(size * nmemb,
-			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
-}
-
-/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
-static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
-{
-	if (size != 0 && nmemb > ULONG_MAX / size)
-		return NULL;
-
-	if (size * nmemb <= PAGE_SIZE)
-	    return kmalloc(nmemb * size, GFP_KERNEL);
-
-	return __vmalloc(size * nmemb,
-			 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
-}
-
-static __inline void drm_free_large(void *ptr)
-{
-	if (!is_vmalloc_addr(ptr))
-		return kfree(ptr);
-
-	vfree(ptr);
-}
+#include "drm_mem_util.h"
 /*@}*/
 
 #endif				/* __KERNEL__ */
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
new file mode 100644
index 0000000..6bd325f
--- /dev/null
+++ b/include/drm/drm_mem_util.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ */
+#ifndef _DRM_MEM_UTIL_H_
+#define _DRM_MEM_UTIL_H_
+
+#include <linux/vmalloc.h>
+
+static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
+{
+	if (size != 0 && nmemb > ULONG_MAX / size)
+		return NULL;
+
+	if (size * nmemb <= PAGE_SIZE)
+	    return kcalloc(nmemb, size, GFP_KERNEL);
+
+	return __vmalloc(size * nmemb,
+			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+}
+
+/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
+static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
+{
+	if (size != 0 && nmemb > ULONG_MAX / size)
+		return NULL;
+
+	if (size * nmemb <= PAGE_SIZE)
+	    return kmalloc(nmemb * size, GFP_KERNEL);
+
+	return __vmalloc(size * nmemb,
+			 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+}
+
+static __inline void drm_free_large(void *ptr)
+{
+	if (!is_vmalloc_addr(ptr))
+		return kfree(ptr);
+
+	vfree(ptr);
+}
+
+#endif
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 676104b..04a6ebc 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -410,6 +410,7 @@
 	{0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0, 0, 0}
 
 #define r128_PCI_IDS \
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e3f1b4a..e929c27 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -115,7 +115,6 @@
 	struct ttm_backend_func *func;
 };
 
-#define TTM_PAGE_FLAG_VMALLOC         (1 << 0)
 #define TTM_PAGE_FLAG_USER            (1 << 1)
 #define TTM_PAGE_FLAG_USER_DIRTY      (1 << 2)
 #define TTM_PAGE_FLAG_WRITE           (1 << 3)
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 6816be6..8b10386 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -14,6 +14,9 @@
 #ifndef ASMARM_AMBA_H
 #define ASMARM_AMBA_H
 
+#include <linux/device.h>
+#include <linux/resource.h>
+
 #define AMBA_NR_IRQS	2
 
 struct amba_device {
diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h
index b4fbd98..5ddd9ad 100644
--- a/include/linux/amba/pl061.h
+++ b/include/linux/amba/pl061.h
@@ -1,3 +1,5 @@
+#include <linux/types.h>
+
 /* platform data for the PL061 GPIO driver */
 
 struct pl061_platform_data {
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index cac84b0..5f494b4 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -565,17 +565,17 @@
 
 static inline int ext3_test_inode_state(struct inode *inode, int bit)
 {
-	return test_bit(bit, &EXT3_I(inode)->i_state);
+	return test_bit(bit, &EXT3_I(inode)->i_state_flags);
 }
 
 static inline void ext3_set_inode_state(struct inode *inode, int bit)
 {
-	set_bit(bit, &EXT3_I(inode)->i_state);
+	set_bit(bit, &EXT3_I(inode)->i_state_flags);
 }
 
 static inline void ext3_clear_inode_state(struct inode *inode, int bit)
 {
-	clear_bit(bit, &EXT3_I(inode)->i_state);
+	clear_bit(bit, &EXT3_I(inode)->i_state_flags);
 }
 #else
 /* Assume that user mode programs are passing in an ext3fs superblock, not
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index 7679acd..f42c098 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -87,7 +87,7 @@
 	 * near to their parent directory's inode.
 	 */
 	__u32	i_block_group;
-	unsigned long	i_state;	/* Dynamic state flags for ext3 */
+	unsigned long	i_state_flags;	/* Dynamic state flags for ext3 */
 
 	/* block reservation info */
 	struct ext3_block_alloc_info *i_block_alloc_info;
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 5a361f8..da7e52b 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -64,9 +64,12 @@
 extern void cancel_freezing(struct task_struct *p);
 
 #ifdef CONFIG_CGROUP_FREEZER
-extern int cgroup_frozen(struct task_struct *task);
+extern int cgroup_freezing_or_frozen(struct task_struct *task);
 #else /* !CONFIG_CGROUP_FREEZER */
-static inline int cgroup_frozen(struct task_struct *task) { return 0; }
+static inline int cgroup_freezing_or_frozen(struct task_struct *task)
+{
+	return 0;
+}
 #endif /* !CONFIG_CGROUP_FREEZER */
 
 /*
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 7be0c6f..c57db27 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -105,7 +105,7 @@
 	/* operation releaser */
 	fscache_operation_release_t release;
 
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
 	const char *name;		/* operation name */
 	const char *state;		/* operation state */
 #define fscache_set_op_name(OP, N)	do { (OP)->name  = (N); } while(0)
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 7b3aae2..354cc56 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -255,6 +255,7 @@
 #define MSG_ERRQUEUE	0x2000	/* Fetch message from error queue */
 #define MSG_NOSIGNAL	0x4000	/* Do not generate SIGPIPE */
 #define MSG_MORE	0x8000	/* Sender will send more */
+#define MSG_WAITFORONE	0x10000	/* recvmmsg(): block until 1+ packets avail */
 
 #define MSG_EOF         MSG_FIN
 
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 59e9ef6..eb3f34d 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -47,17 +47,20 @@
 			    struct freezer, css);
 }
 
-int cgroup_frozen(struct task_struct *task)
+int cgroup_freezing_or_frozen(struct task_struct *task)
 {
 	struct freezer *freezer;
 	enum freezer_state state;
 
 	task_lock(task);
 	freezer = task_freezer(task);
-	state = freezer->state;
+	if (!freezer->css.cgroup->parent)
+		state = CGROUP_THAWED; /* root cgroup can't be frozen */
+	else
+		state = freezer->state;
 	task_unlock(task);
 
-	return state == CGROUP_FROZEN;
+	return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
 }
 
 /*
diff --git a/kernel/cred.c b/kernel/cred.c
index 1ed8ca1..1b1129d 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -364,7 +364,7 @@
 
 	new = kmem_cache_alloc(cred_jar, GFP_ATOMIC);
 	if (!new)
-		return NULL;
+		goto free_tgcred;
 
 	kdebug("prepare_usermodehelper_creds() alloc %p", new);
 
@@ -397,6 +397,10 @@
 
 error:
 	put_cred(new);
+free_tgcred:
+#ifdef CONFIG_KEYS
+	kfree(tgcred);
+#endif
 	return NULL;
 }
 
diff --git a/kernel/early_res.c b/kernel/early_res.c
index 3cb2c66..31aa933 100644
--- a/kernel/early_res.c
+++ b/kernel/early_res.c
@@ -333,6 +333,12 @@
 	struct early_res *r;
 	int i;
 
+	if (start == end)
+		return;
+
+	if (WARN_ONCE(start > end, "  wrong range [%#llx, %#llx]\n", start, end))
+		return;
+
 try_next:
 	i = find_overlapped_early(start, end);
 	if (i >= max_early_res)
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 761fdd2..11f3515 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -69,9 +69,16 @@
 	struct pt_regs		*linux_regs;
 };
 
+/* Exception state values */
+#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
+#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
+#define DCPU_IS_SLAVE    0x4 /* Slave cpu enter exception */
+#define DCPU_SSTEP       0x8 /* CPU is single stepping */
+
 static struct debuggerinfo_struct {
 	void			*debuggerinfo;
 	struct task_struct	*task;
+	int 			exception_state;
 } kgdb_info[NR_CPUS];
 
 /**
@@ -391,27 +398,22 @@
 
 /*
  * Copy the binary array pointed to by buf into mem.  Fix $, #, and
- * 0x7d escaped with 0x7d.  Return a pointer to the character after
- * the last byte written.
+ * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success.
+ * The input buf is overwitten with the result to write to mem.
  */
 static int kgdb_ebin2mem(char *buf, char *mem, int count)
 {
-	int err = 0;
-	char c;
+	int size = 0;
+	char *c = buf;
 
 	while (count-- > 0) {
-		c = *buf++;
-		if (c == 0x7d)
-			c = *buf++ ^ 0x20;
-
-		err = probe_kernel_write(mem, &c, 1);
-		if (err)
-			break;
-
-		mem++;
+		c[size] = *buf++;
+		if (c[size] == 0x7d)
+			c[size] = *buf++ ^ 0x20;
+		size++;
 	}
 
-	return err;
+	return probe_kernel_write(mem, c, size);
 }
 
 /*
@@ -563,49 +565,6 @@
 }
 
 /*
- * CPU debug state control:
- */
-
-#ifdef CONFIG_SMP
-static void kgdb_wait(struct pt_regs *regs)
-{
-	unsigned long flags;
-	int cpu;
-
-	local_irq_save(flags);
-	cpu = raw_smp_processor_id();
-	kgdb_info[cpu].debuggerinfo = regs;
-	kgdb_info[cpu].task = current;
-	/*
-	 * Make sure the above info reaches the primary CPU before
-	 * our cpu_in_kgdb[] flag setting does:
-	 */
-	smp_wmb();
-	atomic_set(&cpu_in_kgdb[cpu], 1);
-
-	/* Disable any cpu specific hw breakpoints */
-	kgdb_disable_hw_debug(regs);
-
-	/* Wait till primary CPU is done with debugging */
-	while (atomic_read(&passive_cpu_wait[cpu]))
-		cpu_relax();
-
-	kgdb_info[cpu].debuggerinfo = NULL;
-	kgdb_info[cpu].task = NULL;
-
-	/* fix up hardware debug registers on local cpu */
-	if (arch_kgdb_ops.correct_hw_break)
-		arch_kgdb_ops.correct_hw_break();
-
-	/* Signal the primary CPU that we are done: */
-	atomic_set(&cpu_in_kgdb[cpu], 0);
-	touch_softlockup_watchdog_sync();
-	clocksource_touch_watchdog();
-	local_irq_restore(flags);
-}
-#endif
-
-/*
  * Some architectures need cache flushes when we set/clear a
  * breakpoint:
  */
@@ -1400,34 +1359,13 @@
 	return 1;
 }
 
-/*
- * kgdb_handle_exception() - main entry point from a kernel exception
- *
- * Locking hierarchy:
- *	interface locks, if any (begin_session)
- *	kgdb lock (kgdb_active)
- */
-int
-kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
+static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
 {
-	struct kgdb_state kgdb_var;
-	struct kgdb_state *ks = &kgdb_var;
 	unsigned long flags;
 	int sstep_tries = 100;
 	int error = 0;
 	int i, cpu;
-
-	ks->cpu			= raw_smp_processor_id();
-	ks->ex_vector		= evector;
-	ks->signo		= signo;
-	ks->ex_vector		= evector;
-	ks->err_code		= ecode;
-	ks->kgdb_usethreadid	= 0;
-	ks->linux_regs		= regs;
-
-	if (kgdb_reenter_check(ks))
-		return 0; /* Ouch, double exception ! */
-
+	int trace_on = 0;
 acquirelock:
 	/*
 	 * Interrupts will be restored by the 'trap return' code, except when
@@ -1435,13 +1373,43 @@
 	 */
 	local_irq_save(flags);
 
-	cpu = raw_smp_processor_id();
+	cpu = ks->cpu;
+	kgdb_info[cpu].debuggerinfo = regs;
+	kgdb_info[cpu].task = current;
+	/*
+	 * Make sure the above info reaches the primary CPU before
+	 * our cpu_in_kgdb[] flag setting does:
+	 */
+	atomic_inc(&cpu_in_kgdb[cpu]);
 
 	/*
-	 * Acquire the kgdb_active lock:
+	 * CPU will loop if it is a slave or request to become a kgdb
+	 * master cpu and acquire the kgdb_active lock:
 	 */
-	while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1)
+	while (1) {
+		if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
+			if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
+				break;
+		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
+			if (!atomic_read(&passive_cpu_wait[cpu]))
+				goto return_normal;
+		} else {
+return_normal:
+			/* Return to normal operation by executing any
+			 * hw breakpoint fixup.
+			 */
+			if (arch_kgdb_ops.correct_hw_break)
+				arch_kgdb_ops.correct_hw_break();
+			if (trace_on)
+				tracing_on();
+			atomic_dec(&cpu_in_kgdb[cpu]);
+			touch_softlockup_watchdog_sync();
+			clocksource_touch_watchdog();
+			local_irq_restore(flags);
+			return 0;
+		}
 		cpu_relax();
+	}
 
 	/*
 	 * For single stepping, try to only enter on the processor
@@ -1475,9 +1443,6 @@
 	if (kgdb_io_ops->pre_exception)
 		kgdb_io_ops->pre_exception();
 
-	kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs;
-	kgdb_info[ks->cpu].task = current;
-
 	kgdb_disable_hw_debug(ks->linux_regs);
 
 	/*
@@ -1486,15 +1451,9 @@
 	 */
 	if (!kgdb_single_step) {
 		for (i = 0; i < NR_CPUS; i++)
-			atomic_set(&passive_cpu_wait[i], 1);
+			atomic_inc(&passive_cpu_wait[i]);
 	}
 
-	/*
-	 * spin_lock code is good enough as a barrier so we don't
-	 * need one here:
-	 */
-	atomic_set(&cpu_in_kgdb[ks->cpu], 1);
-
 #ifdef CONFIG_SMP
 	/* Signal the other CPUs to enter kgdb_wait() */
 	if ((!kgdb_single_step) && kgdb_do_roundup)
@@ -1518,6 +1477,9 @@
 	kgdb_single_step = 0;
 	kgdb_contthread = current;
 	exception_level = 0;
+	trace_on = tracing_is_on();
+	if (trace_on)
+		tracing_off();
 
 	/* Talk to debugger with gdbserial protocol */
 	error = gdb_serial_stub(ks);
@@ -1526,13 +1488,11 @@
 	if (kgdb_io_ops->post_exception)
 		kgdb_io_ops->post_exception();
 
-	kgdb_info[ks->cpu].debuggerinfo = NULL;
-	kgdb_info[ks->cpu].task = NULL;
-	atomic_set(&cpu_in_kgdb[ks->cpu], 0);
+	atomic_dec(&cpu_in_kgdb[ks->cpu]);
 
 	if (!kgdb_single_step) {
 		for (i = NR_CPUS-1; i >= 0; i--)
-			atomic_set(&passive_cpu_wait[i], 0);
+			atomic_dec(&passive_cpu_wait[i]);
 		/*
 		 * Wait till all the CPUs have quit
 		 * from the debugger.
@@ -1551,6 +1511,8 @@
 		else
 			kgdb_sstep_pid = 0;
 	}
+	if (trace_on)
+		tracing_on();
 	/* Free kgdb_active */
 	atomic_set(&kgdb_active, -1);
 	touch_softlockup_watchdog_sync();
@@ -1560,13 +1522,52 @@
 	return error;
 }
 
+/*
+ * kgdb_handle_exception() - main entry point from a kernel exception
+ *
+ * Locking hierarchy:
+ *	interface locks, if any (begin_session)
+ *	kgdb lock (kgdb_active)
+ */
+int
+kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
+{
+	struct kgdb_state kgdb_var;
+	struct kgdb_state *ks = &kgdb_var;
+	int ret;
+
+	ks->cpu			= raw_smp_processor_id();
+	ks->ex_vector		= evector;
+	ks->signo		= signo;
+	ks->ex_vector		= evector;
+	ks->err_code		= ecode;
+	ks->kgdb_usethreadid	= 0;
+	ks->linux_regs		= regs;
+
+	if (kgdb_reenter_check(ks))
+		return 0; /* Ouch, double exception ! */
+	kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
+	ret = kgdb_cpu_enter(ks, regs);
+	kgdb_info[ks->cpu].exception_state &= ~DCPU_WANT_MASTER;
+	return ret;
+}
+
 int kgdb_nmicallback(int cpu, void *regs)
 {
 #ifdef CONFIG_SMP
+	struct kgdb_state kgdb_var;
+	struct kgdb_state *ks = &kgdb_var;
+
+	memset(ks, 0, sizeof(struct kgdb_state));
+	ks->cpu			= cpu;
+	ks->linux_regs		= regs;
+
 	if (!atomic_read(&cpu_in_kgdb[cpu]) &&
-			atomic_read(&kgdb_active) != cpu &&
-			atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) {
-		kgdb_wait((struct pt_regs *)regs);
+	    atomic_read(&kgdb_active) != -1 &&
+	    atomic_read(&kgdb_active) != cpu) {
+		kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
+		kgdb_cpu_enter(ks, regs);
+		kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
 		return 0;
 	}
 #endif
@@ -1742,11 +1743,11 @@
  */
 void kgdb_breakpoint(void)
 {
-	atomic_set(&kgdb_setting_breakpoint, 1);
+	atomic_inc(&kgdb_setting_breakpoint);
 	wmb(); /* Sync point before breakpoint */
 	arch_kgdb_breakpoint();
 	wmb(); /* Sync point after breakpoint */
-	atomic_set(&kgdb_setting_breakpoint, 0);
+	atomic_dec(&kgdb_setting_breakpoint);
 }
 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
 
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 5ade1bd..71ae290 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -88,12 +88,11 @@
 		printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
 				"(%d tasks refusing to freeze):\n",
 				elapsed_csecs / 100, elapsed_csecs % 100, todo);
-		show_state();
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
 			task_lock(p);
 			if (freezing(p) && !freezer_should_skip(p))
-				printk(KERN_ERR " %s\n", p->comm);
+				sched_show_task(p);
 			cancel_freezing(p);
 			task_unlock(p);
 		} while_each_thread(g, p);
@@ -145,7 +144,7 @@
 		if (nosig_only && should_send_signal(p))
 			continue;
 
-		if (cgroup_frozen(p))
+		if (cgroup_freezing_or_frozen(p))
 			continue;
 
 		thaw_process(p);
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 7494bbf..7d3f4fa 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -637,7 +637,7 @@
 			goto cancelled;
 
 		/* the timer holds a reference whilst it is pending */
-		ret = work->ops->get_ref(work);
+		ret = slow_work_get_ref(work);
 		if (ret < 0)
 			goto cant_get_ref;
 
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
index 321f3c5..a29ebd1 100644
--- a/kernel/slow-work.h
+++ b/kernel/slow-work.h
@@ -43,28 +43,28 @@
  */
 static inline void slow_work_set_thread_pid(int id, pid_t pid)
 {
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
 	slow_work_pids[id] = pid;
 #endif
 }
 
 static inline void slow_work_mark_time(struct slow_work *work)
 {
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
 	work->mark = CURRENT_TIME;
 #endif
 }
 
 static inline void slow_work_begin_exec(int id, struct slow_work *work)
 {
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
 	slow_work_execs[id] = work;
 #endif
 }
 
 static inline void slow_work_end_exec(int id, struct slow_work *work)
 {
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
 	write_lock(&slow_work_execs_lock);
 	slow_work_execs[id] = NULL;
 	write_unlock(&slow_work_execs_lock);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 51ca946..3feb2b3 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1194,7 +1194,7 @@
 		hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
 			if (idx < s_idx)
 				goto cont;
-			if (idx > s_idx)
+			if (h > s_h || idx > s_idx)
 				s_ip_idx = 0;
 			in_dev = __in_dev_get_rcu(dev);
 			if (!in_dev)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 0b9d03c..d0a6092 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1616,17 +1616,20 @@
 	int ct;
 	struct rtnexthop *nhp;
 	struct net *net = mfc_net(c);
-	struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev;
 	u8 *b = skb_tail_pointer(skb);
 	struct rtattr *mp_head;
 
-	if (dev)
-		RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
+	/* If cache is unresolved, don't try to parse IIF and OIF */
+	if (c->mfc_parent > MAXVIFS)
+		return -ENOENT;
+
+	if (VIF_EXISTS(net, c->mfc_parent))
+		RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex);
 
 	mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
 
 	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
-		if (c->mfc_un.res.ttls[ct] < 255) {
+		if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
 			if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
 				goto rtattr_failure;
 			nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 54fd68c..d413b57 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1097,7 +1097,7 @@
 }
 
 static int rt_intern_hash(unsigned hash, struct rtable *rt,
-			  struct rtable **rp, struct sk_buff *skb)
+			  struct rtable **rp, struct sk_buff *skb, int ifindex)
 {
 	struct rtable	*rth, **rthp;
 	unsigned long	now;
@@ -1212,11 +1212,16 @@
 		    slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
 			struct net *net = dev_net(rt->u.dst.dev);
 			int num = ++net->ipv4.current_rt_cache_rebuild_count;
-			if (!rt_caching(dev_net(rt->u.dst.dev))) {
+			if (!rt_caching(net)) {
 				printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
 					rt->u.dst.dev->name, num);
 			}
-			rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev));
+			rt_emergency_hash_rebuild(net);
+			spin_unlock_bh(rt_hash_lock_addr(hash));
+
+			hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
+					ifindex, rt_genid(net));
+			goto restart;
 		}
 	}
 
@@ -1477,7 +1482,7 @@
 							&netevent);
 
 				rt_del(hash, rth);
-				if (!rt_intern_hash(hash, rt, &rt, NULL))
+				if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
 					ip_rt_put(rt);
 				goto do_next;
 			}
@@ -1931,7 +1936,7 @@
 
 	in_dev_put(in_dev);
 	hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
-	return rt_intern_hash(hash, rth, NULL, skb);
+	return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
 
 e_nobufs:
 	in_dev_put(in_dev);
@@ -2098,7 +2103,7 @@
 	/* put it into the cache */
 	hash = rt_hash(daddr, saddr, fl->iif,
 		       rt_genid(dev_net(rth->u.dst.dev)));
-	return rt_intern_hash(hash, rth, NULL, skb);
+	return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
 }
 
 /*
@@ -2255,7 +2260,7 @@
 	}
 	rth->rt_type	= res.type;
 	hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
-	err = rt_intern_hash(hash, rth, NULL, skb);
+	err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
 	goto done;
 
 no_route:
@@ -2502,7 +2507,7 @@
 	if (err == 0) {
 		hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
 			       rt_genid(dev_net(dev_out)));
-		err = rt_intern_hash(hash, rth, rp, NULL);
+		err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
 	}
 
 	return err;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3381b43..7e567ae 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3610,7 +3610,7 @@
 		hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
 			if (idx < s_idx)
 				goto cont;
-			if (idx > s_idx)
+			if (h > s_h || idx > s_idx)
 				s_ip_idx = 0;
 			ip_idx = 0;
 			if ((idev = __in6_dev_get(dev)) == NULL)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 23e4ac0..27acfb5 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1695,17 +1695,20 @@
 	int ct;
 	struct rtnexthop *nhp;
 	struct net *net = mfc6_net(c);
-	struct net_device *dev = net->ipv6.vif6_table[c->mf6c_parent].dev;
 	u8 *b = skb_tail_pointer(skb);
 	struct rtattr *mp_head;
 
-	if (dev)
-		RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
+	/* If cache is unresolved, don't try to parse IIF and OIF */
+	if (c->mf6c_parent > MAXMIFS)
+		return -ENOENT;
+
+	if (MIF_EXISTS(net, c->mf6c_parent))
+		RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex);
 
 	mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
 
 	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
-		if (c->mfc_un.res.ttls[ct] < 255) {
+		if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
 			if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
 				goto rtattr_failure;
 			nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7fcb0e5..0d7713c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -890,12 +890,17 @@
 	struct rt6_info *rt = (struct rt6_info *) dst;
 
 	if (rt) {
-		if (rt->rt6i_flags & RTF_CACHE)
-			ip6_del_rt(rt);
-		else
+		if (rt->rt6i_flags & RTF_CACHE) {
+			if (rt6_check_expired(rt)) {
+				ip6_del_rt(rt);
+				dst = NULL;
+			}
+		} else {
 			dst_release(dst);
+			dst = NULL;
+		}
 	}
-	return NULL;
+	return dst;
 }
 
 static void ip6_link_failure(struct sk_buff *skb)
diff --git a/net/socket.c b/net/socket.c
index 769c386..f55ffe9 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2135,6 +2135,10 @@
 			break;
 		++datagrams;
 
+		/* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
+		if (flags & MSG_WAITFORONE)
+			flags |= MSG_DONTWAIT;
+
 		if (timeout) {
 			ktime_get_ts(timeout);
 			*timeout = timespec_sub(end_time, *timeout);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index b546ac2..a2ff861 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -148,6 +148,9 @@
 
 #define xrun_debug(substream, mask) \
 			((substream)->pstr->xrun_debug & (mask))
+#else
+#define xrun_debug(substream, mask)	0
+#endif
 
 #define dump_stack_on_xrun(substream) do {			\
 		if (xrun_debug(substream, XRUN_DEBUG_STACK))	\
@@ -169,6 +172,7 @@
 	}
 }
 
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
 #define hw_ptr_error(substream, fmt, args...)				\
 	do {								\
 		if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {		\
@@ -255,8 +259,6 @@
 
 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
 
-#define xrun_debug(substream, mask)	0
-#define xrun(substream)			do { } while (0)
 #define hw_ptr_error(substream, fmt, args...) do { } while (0)
 #define xrun_log(substream, pos)	do { } while (0)
 #define xrun_log_show(substream)	do { } while (0)
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index 1caf5e3..e68c98e 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -1852,12 +1852,14 @@
 	0x10140523, /* Thinkpad R40 */
 	0x10140534, /* Thinkpad X31 */
 	0x10140537, /* Thinkpad T41p */
+	0x1014053e, /* Thinkpad R40e */
 	0x10140554, /* Thinkpad T42p/R50p */
 	0x10140567, /* Thinkpad T43p 2668-G7U */
 	0x10140581, /* Thinkpad X41-2527 */
 	0x10280160, /* Dell Dimension 2400 */
 	0x104380b0, /* Asus A7V8X-MX */
 	0x11790241, /* Toshiba Satellite A-15 S127 */
+	0x1179ff10, /* Toshiba P500 */
 	0x144dc01a, /* Samsung NP-X20C004/SEG */
 	0 /* end */
 };
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 8b29156..4bb9067 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2269,6 +2269,7 @@
 	SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
+	SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 053d53d..9a23444 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -10043,8 +10043,11 @@
 	alc_set_pin_output(codec, nid, pin_type);
 	if (spec->multiout.dac_nids[dac_idx] == 0x25)
 		idx = 4;
-	else
+	else {
+		if (spec->multiout.num_dacs >= dac_idx)
+			return;
 		idx = spec->multiout.dac_nids[dac_idx] - 2;
+	}
 	snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx);
 
 }